source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
portable.py | #!/usr/bin/python
#
# -*- coding: utf-8 -*-
# vim: set ts=4 sw=4 et sts=4 ai:
import ConfigParser
import subprocess
import sys
import threading
import gobject
import pygtk
pygtk.require('2.0')
import gst
import gtk
import gtk.glade
from twisted.internet import error, defer, reactor
from twisted.python import log as twistedlog
from flumotion.admin import admin
from flumotion.common import log
from flumotion.common import connection
from flumotion.common import componentui
from flumotion.common.planet import moods
from flumotion.monitor.nagios import util
from flumotion.twisted import pb
import inhibitor
import portable_platform
CONFIG = ConfigParser.ConfigParser()
CONFIG.read(['config.ini'])
FAKE=CONFIG.getboolean('config', 'fake')
class PortableXML(object):
def __init__(self):
self.builder = gtk.Builder()
self.builder.add_from_file("portable.glade")
def get_page(self, name):
window = self.builder.get_object(name)
child = window.get_children()[0]
window.remove(child)
return child
def get_object(self, *args, **kw):
return self.builder.get_object(*args, **kw)
class SetUpPage(object):
interval = 1000
def __init__(self, assistant, xml):
self.assistant = assistant
self.xml = xml
if not hasattr(self, 'page') or self.page is None:
self.page = xml.get_page(self.xmlname)
self.index = assistant.append_page(self.page)
assistant.set_page_title(self.page, self.title)
assistant.set_page_type(self.page, gtk.ASSISTANT_PAGE_CONTENT)
assistant.set_page_complete(self.page, False)
self.timer = None
assistant.connect("prepare", self.on_prepare)
def on_prepare(self, assistant, page):
if page == self.page:
self.timer = gobject.timeout_add(self.interval, self.update)
self.on_show()
else:
self.on_unshow()
def update(self):
pass
def on_unshow(self):
self.timer = None
def on_show(self):
self.update()
def focus_forward(self):
def callback(widget, label):
if hasattr(widget, 'get_children'):
for child in widget.get_children():
if hasattr(child, 'get_label') and child.get_label() == label:
child.grab_focus()
self.assistant.forall(callback, 'gtk-go-forward')
class BatteryPage(SetUpPage):
xmlname = "battery"
title = "Power Setup"
def update(self, evt=None):
if self.timer is not None:
pic = self.xml.get_object('battery-pic')
text = self.xml.get_object('battery-instructions')
if portable_platform.get_ac_status() or FAKE:
pic.set_from_file('img/photos/power-connected.jpg')
text.set_label('Power has been connected, yay!')
self.assistant.set_page_complete(self.page, True)
self.focus_forward()
else:
pic.set_from_file('img/photos/power-disconnected.jpg')
text.set_label('Please connect the power cable.')
self.assistant.set_page_complete(self.page, False)
return True
return False
class NetworkPage(SetUpPage):
xmlname = "network"
title = "Network Setup"
def update(self, evt=None):
if self.timer is not None:
self.check_network()
return True
return False
def check_network(self):
if not getattr(self, 'checking', False):
self.checking = True
def callback(self=self):
import gobject
if portable_platform.get_network_status() or FAKE:
gobject.idle_add(self.network_connected)
else:
gobject.idle_add(self.network_disconnected)
self.checking = False
t = threading.Thread(target=callback)
t.start()
def network_connected(self):
text = self.xml.get_object('network-instructions')
text.set_label('')
self.assistant.set_page_complete(self.page, True)
self.focus_forward()
def network_disconnected(self):
text = self.xml.get_object('network-instructions')
text.set_label('Please turn on the Telstra Next-G Elite Device. (Power button is shown below.)')
self.assistant.set_page_complete(self.page, False)
class VideoPage(SetUpPage):
def __init__(self, *args, **kw):
self.player = None
SetUpPage.__init__(self, *args, **kw)
video = self.xml.get_object(self.video_component)
video.unset_flags(gtk.DOUBLE_BUFFERED)
video.connect("expose-event", self.on_expose)
video.connect("map-event", self.on_map)
video.connect("unmap-event", self.on_unshow)
def update(self, evt=None):
if self.timer is not None:
if self.player is not None:
if (gst.STATE_CHANGE_SUCCESS, gst.STATE_PLAYING) == self.player.get_state(timeout=1)[:-1] or FAKE:
self.assistant.set_page_complete(self.page, True)
self.focus_forward()
return True
self.assistant.set_page_complete(self.page, False)
return True
return False
def on_expose(self, *args):
video = self.xml.get_object(self.video_component)
# Force the window to be realized
video.window.xid
def on_map(self, *args):
video = self.xml.get_object(self.video_component)
self.video_xid = video.window.xid
self.player = gst.parse_launch(self.video_pipeline)
bus = self.player.get_bus()
bus.add_signal_watch()
bus.enable_sync_message_emission()
bus.connect("message", self.on_message)
bus.connect("sync-message::element", self.on_sync_message)
self.player.set_state(gst.STATE_PLAYING)
def on_unshow(self, *args):
SetUpPage.on_unshow(self)
if self.player is not None:
self.player.set_state(gst.STATE_NULL)
while (gst.STATE_CHANGE_SUCCESS, gst.STATE_NULL) != self.player.get_state()[:-1]:
pass
sys.stdout.flush()
self.player = None
def on_message(self, bus, message):
print message
t = message.type
if t == gst.MESSAGE_EOS:
print "End of stream!?"
self.player.set_state(gst.STATE_NULL)
elif t == gst.MESSAGE_ERROR:
err, debug = message.parse_error()
print "Error: %s" % err, debug
self.player.set_state(gst.STATE_NULL)
def on_sync_message(self, bus, message):
if message.structure is None:
return
message_name = message.structure.get_name()
if message_name == "prepare-xwindow-id":
message.src.set_property("force-aspect-ratio", True)
message.src.set_xwindow_id(self.video_xid)
class PresentationPage(VideoPage):
xmlname = "presentation"
title = "Presentation Capture Setup"
# FIXME: Need to keep this in sync with producer-firewire in flumotion-config/collector-portable.xml
video_pipeline = """\
dv1394src guid=%s !
queue leaky=2 max-size-time=1000000000 !
dvdemux name=demux !
dvdec !
videoscale !
autovideosink
""" % CONFIG.get('firewire', 'guid')
video_component = 'presentation-preview'
class CameraPage(VideoPage):
xmlname = "camera"
title = "Presenter Capture Setup"
# FIXME: Need to keep this in sync with composite-video the flumotion-config/collector-portable.xml
video_pipeline = """\
v4l2src device=%s !
image/jpeg,width=640,height=480,framerate=(fraction)24/1 !
jpegdec !
videocrop left=80 right=80 top=0 bottom=0 !
videoscale !
autovideosink
""" % CONFIG.get('camera', 'device')
video_component = 'camera-preview'
class AudioPage(SetUpPage):
def __init__(self, flumotion, *args):
self.flumotion = flumotion
SetUpPage.__init__(self, *args)
self.volume_monitor = None
def on_show(self):
d = self.flumotion.load()
def loaded(*args):
self.update()
d.addCallback(loaded)
def update(self, evt=None):
if self.timer is None:
return False
self.set_state()
return True
def set_state(self):
def planet_callback(uistate, self=self):
component = util.findComponent(uistate, '/default/producer-firewire')
if self.volume_monitor is None:
import volume_monitor
parent_widget = self.xml.get_object(self.box)
self.volume_monitor = volume_monitor.VolumeMonitor(self.flumotion.medium, component, force_channels=1)
volume_widget = self.volume_monitor.widget
old_parent = volume_widget.get_parent()
if old_parent:
old_parent.remove(volume_widget)
parent_widget.pack_start(volume_widget)
if moods.get(component.get('mood')) is not moods.happy:
return
def component_callback(firewire_uistate, self=self):
if self.volume_monitor.state is not None:
return
self.volume_monitor.setUIState(firewire_uistate)
self.assistant.set_page_complete(self.page, True)
self.focus_forward()
d = self.flumotion.medium.componentCallRemote(component, 'getUIState')
d.addCallback(component_callback)
d = self.flumotion.get_planet_state()
d.addCallback(planet_callback)
class AudioInRoomPage(AudioPage):
xmlname = "audio-inroom"
title = "Inroom Audio Setup"
box = "audio-inroom-box"
class AudioStandAlonePage(AudioPage):
xmlname = "audio-standalone"
title = "Stand Alone Audio Setup"
box = "audio-standalone-box"
class SuccessPage(AudioPage, VideoPage):
xmlname = "preview"
title = "Success - Video streaming!"
box = "audio-preview-box"
video_pipeline = """\
playbin2 uri=http://localhost:8800/output
"""
video_component = 'video-preview'
def __init__(self, flumotion, assistant, xml):
AudioPage.__init__(self, flumotion, assistant, xml)
VideoPage.__init__(self, assistant, xml)
def on_show(self):
return AudioPage.on_show(self)
def update(self):
return AudioPage.update(self) or VideoPage.update(self)
def on_unshow(self):
return AudioPage.on_unshow(self)
class FlumotionConnection(object):
def __init__(self):
log.init()
self.medium = admin.AdminModel()
i = connection.PBConnectionInfo(
"127.0.0.1", 7531, True, pb.Authenticator(username='user', password='test'))
d = self.medium.connectToManager(i)
d.addCallback(self.connected)
d.addErrback(twistedlog.err)
def connected(self, *args):
print "Connected to flumotion."
d = self.medium.cleanComponents()
def loaded(*args):
print "Configuration cleared."
d.addCallback(loaded)
d.addErrback(twistedlog.err)
return d
def get_planet_state(self):
d = self.medium.callRemote('getPlanetState')
d.addErrback(twistedlog.err)
return d
def load(self):
config = open('../flumotion-config/collector-portable.xml').read()
camera_device = CONFIG.get('camera', 'device')
firewire_guid = CONFIG.get('firewire', 'guid')
d = self.medium.loadConfiguration(config % locals())
def loaded(*args):
print "Full configuration loaded."
d.addCallback(loaded)
d.addErrback(twistedlog.err)
return d
class App(object):
def delete_event(self, widget, event, data=None):
return False
def destroy(self, widget, data=None):
gtk.main_quit()
def __init__(self):
subprocess.call('sudo /etc/init.d/flumotion restart', shell=True)
subprocess.call('xset s off', shell=True)
subprocess.call('xset -dpms', shell=True)
# Stop the screensaver and screen blanking
self.inhibitor = inhibitor.Inhibitor()
self.inhibitor.inhibit(reason="Video streaming!")
flumotion = FlumotionConnection()
xml = PortableXML()
self.xml = xml
assistant = gtk.Assistant()
self.assistant = assistant
battery = BatteryPage(assistant, xml)
network = NetworkPage(assistant, xml)
presentation = PresentationPage(assistant, xml)
camera = CameraPage(assistant, xml)
audio_inroom = AudioInRoomPage(flumotion, assistant, xml)
#audio_standalone = AudioStandAlonePage(flumotion, assistant, xml)
#interaction = xml.get_page("interaction")
#assistant.append_page(interaction)
#assistant.set_page_title(interaction, "Interaction Setup")
#assistant.set_page_type(interaction, gtk.ASSISTANT_PAGE_CONTENT)
#assistant.set_page_complete(interaction, False)
success = SuccessPage(flumotion, assistant, xml)
# and the window
assistant.show_all()
assistant.connect("close", gtk.main_quit, "WM destroy")
assistant.fullscreen()
def main(self):
gtk.gdk.threads_init()
reactor.run()
def main(args):
app = App()
app.main()
|
lisp.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import hex
from builtins import str
from builtins import int
from builtins import range
from builtins import object
from past.utils import old_div
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import traceback
from Crypto.Cipher import AES
import ecdsa
import json
import copy
import chacha
import poly1305
import geopy
import curve25519
from subprocess import getoutput
import queue
import distro
import pprint
#
# For printing the lisp_rloc_probe_list{}.
#
lisp_print_rloc_probe_list = False
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
lisp_map_notify_queue = {} # Key is concat of nonce and etr address
lisp_map_servers_list = {} # Key is ms-name/address string, value lisp_ms()
lisp_ddt_map_requestQ = {}
lisp_db_list = [] # Elements are class lisp_mapping()
lisp_group_mapping_list = {} # Elements are class lisp_group_mapping()
lisp_map_resolvers_list = {} # Key is mr-name/address string, value lisp_mr()
lisp_rtr_list = {} # Key is address string, value is lisp_address()
lisp_elp_list = {}
lisp_rle_list = {}
lisp_geo_list = {}
lisp_json_list = {}
lisp_myrlocs = [None, None, None]
lisp_mymacs = {}
#
# Used for multi-tenancy. First dictionary array is indexed by device name
# and second one has value lisp_interface() indexed by a instance-id string.
#
lisp_myinterfaces = {}
lisp_iid_to_interface = {}
lisp_multi_tenant_interfaces = []
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
#
# Stats variables.
#
lisp_registered_count = 0
#
# For tracking Map-Requesters behind NAT devices.
#
lisp_info_sources_by_address = {}
lisp_info_sources_by_nonce = {}
#
# Store computed keys per RLOC. The key is the nonce from the Map-Request
# at the time creates the g, p, and public-key values. The value is an
# array of 4 elements, indexed by key-id.
#
lisp_crypto_keys_by_nonce = {}
lisp_crypto_keys_by_rloc_encap = {} # Key is "<rloc>:<port>" tuple
lisp_crypto_keys_by_rloc_decap = {} # Key is "<rloc>:<port>" tuple
lisp_data_plane_security = False
lisp_search_decap_keys = True
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
#
# When NAT-traversal is enabled and lisp-crypto is enabled, an ITR needs
# to send RLOC-probe requests with an ephemeral port that is also used
# for data encapsulation to the RTR. This way the RTR can find the crypto
# key when multiple xTRs are behind the same NAT.
#
lisp_crypto_ephem_port = None
#
# Is the lisp-itr process running as a PITR?
#
lisp_pitr = False
#
# Are we listening on all MAC frames?
#
lisp_l2_overlay = False
#
# RLOC-probing variables. And for NAT-traversal, register only reachable
# RTRs which is determined from the lisp_rloc_probe_list.
#
lisp_rloc_probing = False
lisp_rloc_probe_list = {}
#
# Command "lisp xtr-parameters" register-reachabile-rtrs has opposite polarity
# to lisp_register_all_rtrs. So by default we do not consider RLOC-probing
# reachability status in registering RTRs to the mapping system.
#
lisp_register_all_rtrs = True
#
# Nonce Echo variables.
#
lisp_nonce_echoing = False
lisp_nonce_echo_list = {}
#
# xTR configuration parameters.
#
lisp_nat_traversal = False
#
# xTR configuration parameters. This flag is used to indicate that when a
# map-cache entry is created or updated, that we write specific information
# to say a Broadcom chip, that will do VXLAN encapsulation. This is a way
# to get existing hardware to do L3 overlays with the LISP control-plane
# when all it supports is VXLAN. See lisp_program_vxlan_hardware()
#
lisp_program_hardware = False
#
# Should we write to the lisp.checkpoint file.
#
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
#
# Should we write map-cache entries to a named socket for another data-plane?
#
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
#
# This lock is used so the lisp-core process doesn't intermix command
# processing data with show data and packet data.
#
lisp_ipc_lock = None
#
# Use this as a default instance-ID when there are no "lisp interface" commands
# configured. This default instance-ID is taken from the first database-mapping
# command.
#
lisp_default_iid = 0
lisp_default_secondary_iid = 0
#
# Configured list of RTRs that the lisp-core process will insert into
# Info-Reply messages.
#
lisp_ms_rtr_list = [] # Array of type lisp.lisp_address()
#
# Used in an RTR to store a translated port for a translated RLOC. Key is
# hostname that is sent in a Info-Request is a nested array. See
# lisp_store_nat_info() for details.
#
lisp_nat_state_info = {}
#
# Used for doing global rate-limiting of Map-Requests. When the process
# starts up or the map-cache is cleared by user we don't do rate-limiting for
# 1 minute so we can load up the cache quicker.
#
lisp_last_map_request_sent = None
lisp_no_map_request_rate_limit = time.time()
#
# Used for doing global rate-limiting of ICMP Too Big messages.
#
lisp_last_icmp_too_big_sent = 0
#
# Array to store 1000 flows.
#
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = []
#
# Store configured or API added policy parameters.
#
lisp_policies = {}
#
# Load-split pings. We'll has the first long of a ICMP echo-request and
# echo-reply for testing purposes. To show per packet load-splitting.
#
lisp_load_split_pings = False
#
# This array is a configured list of IPv6-prefixes that define what part
# of a matching address is used as the crypto-hash. They must be on 4-bit
# boundaries for easy matching.
#
lisp_eid_hashes = []
#
# IPv4 reassembly buffer. We pcapture IPv4 fragments. They can come to the ETR
# when IPv6 is encapsulated in IPv4 and we have an MTU violation for the
# encapsulated packet. The array is index by the IPv4 ident field and contains
# an array of packet buffers. Once all fragments have arrived, the IP header
# is removed from all fragments except the first one.
#
lisp_reassembly_queue = {}
#
# Map-Server pubsub cache. Remember Map-Requesters that set the N-bit for
# a EID target it is requesting. Key is EID-prefix in string format with
# bracketed instance-ID included in slash format. The value of the dictionary
# array is a dictionary array of ITR addresses in string format.
#
lisp_pubsub_cache = {}
#
# When "decentralized-push-xtr = yes" is configured, the xTR is also running as
# a Map-Server and Map-Resolver. So Map-Register messages the ETR sends is
# looped back to the lisp-ms process.
#
lisp_decent_push_configured = False
#
# When "decentralized-pull-xtr-[modulus,dns-suffix] is configured, the xTR is
# also running as a Map-Server and Map-Resolver. So Map-Register messages the
# ETR sends is looped back to the lisp-ms process.
#
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
#
# lisp.lisp_ipc_socket is used by the lisp-itr process during RLOC-probing
# to send the lisp-etr process status about RTRs learned. This is part of
# NAT-traversal support.
#
lisp_ipc_socket = None
#
# Configured in the "lisp encryption-keys" command.
#
lisp_ms_encryption_keys = {}
lisp_ms_json_keys = {}
#
# Used to stare NAT translated address state in an RTR when a ltr client
# is sending RLOC-based LISP-Trace messages. If the RTR encounters any
# LISP-Trace error proessing called from lisp_rtr_data_plane() then it
# can return a partially filled LISP-Trace packet to the ltr client that
# site behind a NAT device.
#
# Dictiionary array format is:
# key = self.local_addr + ":" + self.local_port
# lisp_rtr_nat_trace_cache[key] = (translated_rloc, translated_port)
#
# And the array elements are added in lisp_trace.rtr_cache_nat_trace().
#
lisp_rtr_nat_trace_cache = {}
#
# Configured glean mappings. The data structure is an array of dictionary
# arrays with keywords "eid-prefix", "group-prefix", "rloc-prefix", and
# "instance-id". If keywords are not in dictionary array, the value is
# wildcarded. The values eid-prefix, group-prefix and rloc-prefix is
# lisp_address() so longest match lookups can be performed. The instance-id
# value is an array of 2 elements that store same value in both elements if
# not a range or the low and high range values.
#
lisp_glean_mappings = []
#
# Gleaned groups data structure. Used to find all (S,G) and (*,G) the gleaned
# EID has joined. This data structure will be used to time out entries that
# have stopped joining. In which case, the RLE is removed from the (S,G) or
# (*,G) that join timed out.
#
# The dictionary array is indexed by "[<iid>]<eid>" and the value field is a
# dictoinary array indexed by group address string. The value of the nested
# dictionay array is a timestamp. When EID 1.1.1.1 has joined groups 224.1.1.1,
# and 224.2.2.2, here is how timestamp 1111 and 2222 are stored.
#
# >>> lisp_gleaned_groups = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"] = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.1.1.1"] = 1111
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.2.2.2"] = 2222
# >>> lisp_gleaned_groups
# {'[1539]1.1.1.1': {'224.2.2.2': 2222, '224.1.1.1': 1111}}
#
lisp_gleaned_groups = {}
#
# Use this socket for all ICMP Too-Big messages sent by any process. We are
# centralizing it here.
#
lisp_icmp_raw_socket = None
if (os.getenv("LISP_SEND_ICMP_TOO_BIG") != None):
lisp_icmp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_ICMP)
lisp_icmp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
#endif
lisp_ignore_df_bit = (os.getenv("LISP_IGNORE_DF_BIT") != None)
#------------------------------------------------------------------------------
#
# UDP ports used by LISP.
#
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
#
# Packet type definitions.
#
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
#
# Map-Reply action values.
#
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
LISP_SEND_PUBSUB_ACTION = 6
lisp_map_reply_action_string = ["no-action", "native-forward",
"send-map-request", "drop-action", "policy-denied",
"auth-failure", "send-subscribe"]
#
# Various HMACs alg-ids and lengths (in bytes) used by LISP.
#
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
#
# LCAF types as defined in draft-ietf-lisp-lcaf.
#
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
#
# TTL constant definitions.
#
LISP_MR_TTL = (24*60)
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_MCAST_TTL = 15
LISP_IGMP_TTL = 240
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_TEST_MR_INTERVAL = 60 # In units of seconds, 1 minute
LISP_MAP_NOTIFY_INTERVAL = 2 # In units of seconds
LISP_DDT_MAP_REQUEST_INTERVAL = 2 # In units of seconds
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15 # In units of seconds
LISP_MAP_REQUEST_RATE_LIMIT = .5 # In units of seconds, 500 ms
LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME = 60 # In units of seconds, 1 minute
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1 # In units of seconds
#LISP_RLOC_PROBE_TTL = 255
LISP_RLOC_PROBE_TTL = 128
LISP_RLOC_PROBE_INTERVAL = 10 # In units of seconds
LISP_RLOC_PROBE_REPLY_WAIT = 15 # In units of seconds
LISP_DEFAULT_DYN_EID_TIMEOUT = 15 # In units of seconds
LISP_NONCE_ECHO_INTERVAL = 10
LISP_IGMP_TIMEOUT_INTERVAL = 180 # In units of seconds, 3 minutes
#
# Cipher Suites defined in RFC 8061:
#
# Cipher Suite 0:
# Reserved
#
# Cipher Suite 1 (LISP_2048MODP_AES128_CBC_SHA256):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 2 (LISP_EC25519_AES128_CBC_SHA256):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 3 (LISP_2048MODP_AES128_GCM):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 4 (LISP_3072MODP_AES128_GCM):
# Diffie-Hellman Group: 3072-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 5 (LISP_256_EC25519_AES128_GCM):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 6 (LISP_256_EC25519_CHACHA20_POLY1305):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: Chacha20-Poly1305 [CHACHA-POLY] [RFC7539]
# Integrity: Integrated with AEAD_CHACHA20_POLY1305 [CHACHA-POLY]
# IV length: 8 bytes
# KDF: HMAC-SHA-256
#
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
use_chacha = (os.getenv("LISP_USE_CHACHA") != None)
use_poly = (os.getenv("LISP_USE_POLY") != None)
#------------------------------------------------------------------------------
#
# lisp_record_traceback
#
# Open ./logs/lisp-traceback.log file and write traceback info to it.
#
def lisp_record_traceback(*args):
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
fd = open("./logs/lisp-traceback.log", "a")
fd.write("---------- Exception occurred: {} ----------\n".format(ts))
try:
traceback.print_last(file=fd)
except:
fd.write("traceback.print_last(file=fd) failed")
#endtry
try:
traceback.print_last()
except:
print("traceback.print_last() failed")
#endtry
fd.close()
return
#enddef
#
# lisp_set_exception
#
# Set exception callback to call lisp.lisp_record_traceback().
#
def lisp_set_exception():
sys.excepthook = lisp_record_traceback
return
#enddef
#
# lisp_is_raspbian
#
# Return True if this system is running Raspbian on a Raspberry Pi machine.
#
def lisp_is_raspbian():
if (distro.linux_distribution()[0] != "debian"): return(False)
return(platform.machine() in ["armv6l", "armv7l"])
#enddef
#
# lisp_is_ubuntu
#
# Return True if this system is running Ubuntu Linux.
#
def lisp_is_ubuntu():
return(distro.linux_distribution()[0] == "Ubuntu")
#enddef
#
# lisp_is_fedora
#
# Return True if this system is running Fedora Linux.
#
def lisp_is_fedora():
return(distro.linux_distribution()[0] == "fedora")
#enddef
#
# lisp_is_centos
#
# Return True if this system is running CentOS Linux.
#
def lisp_is_centos():
return(distro.linux_distribution()[0] == "centos")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian():
return(distro.linux_distribution()[0] == "debian")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian_kali():
return(distro.linux_distribution()[0] == "Kali")
#enddef
#
# lisp_is_macos
#
# Return True if this system is running MacOS operating system.
#
def lisp_is_macos():
return(platform.uname()[0] == "Darwin")
#enddef
#
# lisp_is_alpine
#
# Return True if this system is running the Apline Linux operating system.
#
def lisp_is_alpine():
return(os.path.exists("/etc/alpine-release"))
#enddef
#
# lisp_is_x86
#
# Return True if this process is an x86 little-endian machine.
#
def lisp_is_x86():
cpu = platform.machine()
return(cpu in ("x86", "i686", "x86_64"))
#enddef
#
# lisp_is_linux
#
# Return True if this is a ubuntu or fedora system.
#
def lisp_is_linux():
return(platform.uname()[0] == "Linux")
#enddef
#
# lisp_is_python2
#
# Return True if this code is running Python 2.7.x.
#
def lisp_is_python2():
ver = sys.version.split()[0]
return(ver[0:3] == "2.7")
#enddef
#
# lisp_is_python3
#
# Return True if this code is running Python 3.x.x.
#
def lisp_is_python3():
ver = sys.version.split()[0]
return(ver[0:2] == "3.")
#enddef
#
# lisp_on_aws
#
# Return True if this node is running in an Amazon VM on AWS.
#
def lisp_on_aws():
vm = getoutput("sudo dmidecode -s bios-vendor")
if (vm.find("command not found") != -1 and lisp_on_docker()):
aws = bold("AWS check", False)
lprint("{} - dmidecode not installed in docker container".format(aws))
#endif
return(vm.lower().find("amazon") != -1)
#enddef
#
# lisp_on_gcp
#
# Return True if this node is running in an Google Compute Engine VM.
#
def lisp_on_gcp():
vm = getoutput("sudo dmidecode -s bios-version")
return(vm.lower().find("google") != -1)
#enddef
#
# lisp_on_docker
#
# Are we in a docker container?
#
def lisp_on_docker():
return(os.path.exists("/.dockerenv"))
#enddef
#
# lisp_process_logfile
#
# Check to see if logfile exists. If not, it is startup time to create one
# or another procedure rotated the file out of the directory.
#
def lisp_process_logfile():
logfile = "./logs/lisp-{}.log".format(lisp_log_id)
if (os.path.exists(logfile)): return
sys.stdout.close()
sys.stdout = open(logfile, "a")
lisp_print_banner(bold("logfile rotation", False))
return
#enddef
#
# lisp_i_am
#
# The individual components tell the libraries who they are so we can prefix
# the component name for print() and logs().
#
def lisp_i_am(name):
global lisp_log_id, lisp_i_am_itr, lisp_i_am_etr, lisp_i_am_rtr
global lisp_i_am_mr, lisp_i_am_ms, lisp_i_am_ddt, lisp_i_am_core
global lisp_hostname
lisp_log_id = name
if (name == "itr"): lisp_i_am_itr = True
if (name == "etr"): lisp_i_am_etr = True
if (name == "rtr"): lisp_i_am_rtr = True
if (name == "mr"): lisp_i_am_mr = True
if (name == "ms"): lisp_i_am_ms = True
if (name == "ddt"): lisp_i_am_ddt = True
if (name == "core"): lisp_i_am_core = True
#
# Set hostname to normalize dino-macbook.local or dino-macbook.wp.comcast.
# net to "dino-macbook".
#
lisp_hostname = socket.gethostname()
index = lisp_hostname.find(".")
if (index != -1): lisp_hostname = lisp_hostname[0:index]
return
#enddef
#
# lprint
#
# Print with timestamp and component name prefixed. If "force" is any argument,
# then we don't care about the lisp_debug_logging setting and a log message
# is issued.
#
def lprint(*args):
force = ("force" in args)
if (lisp_debug_logging == False and force == False): return
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print("{}: {}:".format(ts, lisp_log_id), end=" ")
for arg in args:
if (arg == "force"): continue
print(arg, end=" ")
#endfor
print()
try: sys.stdout.flush()
except: pass
return
#enddef
#
# fprint
#
# Do a lprint() when debug logging is off but "force" flag is supplied and
# can print messages..
#
def fprint(*args):
nargs = args + ("force",)
lprint(*nargs)
return
#enddef
#
# dprint
#
# Data-plane logging. Call lprint() only if lisp.lisp_data_plane_logging is
# True.
#
def dprint(*args):
if (lisp_data_plane_logging): lprint(*args)
return
#enddef
#
# cprint
#
# Print the class instance.
#
def cprint(instance):
print("{}:".format(instance))
pprint.pprint(instance.__dict__)
#enddef
#
# debug
#
# Used for debugging. Used to find location of temporary "printf" code so it
# can be removed for production code.
#
def debug(*args):
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print(red(">>>", False), end=" ")
print("{}:".format(ts), end=" ")
for arg in args: print(arg, end=" ")
print(red("<<<\n", False))
try: sys.stdout.flush()
except: pass
return
#enddef
#
# lisp_print_caller
#
# Print out calling stack.
#
def lisp_print_caller():
fprint(traceback.print_last())
#enddef
#
# lisp_print_banner
#
# Print out startup and shutdown banner.
#
def lisp_print_banner(string):
global lisp_version, lisp_hostname
if (lisp_version == ""):
lisp_version = getoutput("cat lisp-version.txt")
#endif
hn = bold(lisp_hostname, False)
lprint("lispers.net LISP {} {}, version {}, hostname {}".format(string,
datetime.datetime.now(), lisp_version, hn))
return
#enddef
#
# green
#
# For printing banner.
#
def green(string, html):
if (html): return('<font color="green"><b>{}</b></font>'.format(string))
return(bold("\033[92m" + string + "\033[0m", html))
#enddef
#
# green_last_sec
#
# For printing packets in the last 1 second.
#
def green_last_sec(string):
return(green(string, True))
#enddef
#
# green_last_minute
#
# For printing packets in the last 1 minute.
#
def green_last_min(string):
return('<font color="#58D68D"><b>{}</b></font>'.format(string))
#enddef
#
# red
#
# For printing banner.
#
def red(string, html):
if (html): return('<font color="red"><b>{}</b></font>'.format(string))
return(bold("\033[91m" + string + "\033[0m", html))
#enddef
#
# blue
#
# For printing distinguished-name AFIs.
#
def blue(string, html):
if (html): return('<font color="blue"><b>{}</b></font>'.format(string))
return(bold("\033[94m" + string + "\033[0m", html))
#enddef
#
# bold
#
# For printing banner.
#
def bold(string, html):
if (html): return("<b>{}</b>".format(string))
return("\033[1m" + string + "\033[0m")
#enddef
#
# convert_font
#
# Converts from text baesd bold/color to HTML bold/color.
#
def convert_font(string):
escapes = [ ["[91m", red], ["[92m", green], ["[94m", blue], ["[1m", bold] ]
right = "[0m"
for e in escapes:
left = e[0]
color = e[1]
offset = len(left)
index = string.find(left)
if (index != -1): break
#endfor
while (index != -1):
end = string[index::].find(right)
bold_string = string[index+offset:index+end]
string = string[:index] + color(bold_string, True) + \
string[index+end+offset::]
index = string.find(left)
#endwhile
#
# Call this function one more time if a color was in bold.
#
if (string.find("[1m") != -1): string = convert_font(string)
return(string)
#enddef
#
# lisp_space
#
# Put whitespace in URL encoded string.
#
def lisp_space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_button
#
# Return string of a LISP html button.
#
def lisp_button(string, url):
b = '<button style="background-color:transparent;border-radius:10px; ' + \
'type="button">'
if (url == None):
html = b + string + "</button>"
else:
a = '<a href="{}">'.format(url)
s = lisp_space(2)
html = s + a + b + string + "</button></a>" + s
#endif
return(html)
#enddef
#
# lisp_print_cour
#
# Print in HTML Courier-New font.
#
def lisp_print_cour(string):
output = '<font face="Courier New">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_print_sans
#
# Print in HTML Sans-Serif font.
#
def lisp_print_sans(string):
output = '<font face="Sans-Serif">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_span
#
# Print out string when a pointer hovers over some text.
#
def lisp_span(string, hover_string):
output = '<span title="{}">{}</span>'.format(hover_string, string)
return(output)
#enddef
#
# lisp_eid_help_hover
#
# Create hover title for any input EID form.
#
def lisp_eid_help_hover(output):
eid_help_str = \
'''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# lisp_geo_help_hover
#
# Create hover title for any input Geo or EID form.
#
def lisp_geo_help_hover(output):
eid_help_str = \
'''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# space
#
# Put whitespace in URL encoded string.
#
def space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_get_ephemeral_port
#
# Select random UDP port for use of a source port in a Map-Request and
# destination port in a Map-Reply.
#
def lisp_get_ephemeral_port():
return(random.randrange(32768, 65535))
#enddef
#
# lisp_get_data_nonce
#
# Get a 24-bit random nonce to insert in data header.
#
def lisp_get_data_nonce():
return(random.randint(0, 0xffffff))
#enddef
#
# lisp_get_control_nonce
#
# Get a 64-bit random nonce to insert in control packets.
#
def lisp_get_control_nonce():
return(random.randint(0, (2**64)-1))
#enddef
#
# lisp_hex_string
#
# Take an integer, either 16, 32, or 64 bits in width and return a hex string.
# But don't return the leading "0x". And don't return a trailing "L" if the
# integer is a negative 64-bit value (high-order bit set).
#
def lisp_hex_string(integer_value):
value = hex(integer_value)[2::]
if (value[-1] == "L"): value = value[0:-1]
return(value)
#enddef
#
# lisp_get_timestamp
#
# Use time library to get a current timestamp.
#
def lisp_get_timestamp():
return(time.time())
#enddef
lisp_uptime = lisp_get_timestamp()
#
# lisp_set_timestamp
#
# Use time library to set time into the future.
#
def lisp_set_timestamp(seconds):
return(time.time() + seconds)
#enddef
#
# lisp_print_elapsed
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_elapsed(ts):
if (ts == 0 or ts == None): return("never")
elapsed = time.time() - ts
elapsed = round(elapsed, 0)
return(str(datetime.timedelta(seconds=elapsed)))
#enddef
#
# lisp_print_future
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_future(ts):
if (ts == 0): return("never")
future = ts - time.time()
if (future < 0): return("expired")
future = round(future, 0)
return(str(datetime.timedelta(seconds=future)))
#enddef
#
# lisp_print_eid_tuple
#
# Prints in html or returns a string of the following combinations:
#
# [<iid>]<eid>/<ml>
# <eid>/<ml>
# ([<iid>]<source-eid>/ml, [<iid>]<group>/ml)
#
# This is called by most of the data structure classes as "print_eid_tuple()".
#
def lisp_print_eid_tuple(eid, group):
eid_str = eid.print_prefix()
if (group.is_null()): return(eid_str)
group_str = group.print_prefix()
iid = group.instance_id
if (eid.is_null() or eid.is_exact_match(group)):
index = group_str.find("]") + 1
return("[{}](*, {})".format(iid, group_str[index::]))
#endif
sg_str = eid.print_sg(group)
return(sg_str)
#enddef
#
# lisp_convert_6to4
#
# IPC messages will store an IPv4 address in an IPv6 "::ffff:<ipv4-addr>"
# format since we have a udp46 tunnel open. Convert it an IPv4 address.
#
def lisp_convert_6to4(addr_str):
if (addr_str.find("::ffff:") == -1): return(addr_str)
addr = addr_str.split(":")
return(addr[-1])
#enddef
#
# lisp_convert_4to6
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
# Returns a lisp_address().
#
def lisp_convert_4to6(addr_str):
addr = lisp_address(LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(addr_str)): addr_str = "::ffff:" + addr_str
addr.store_address(addr_str)
return(addr)
#enddef
#
# lisp_gethostbyname
#
# Return an address if string is a name or address. If socket.gethostbyname()
# fails, try socekt.getaddrinfo(). We may be running on Alpine Linux which
# doesn't return DNS names with gethostbyname().
#
def lisp_gethostbyname(string):
ipv4 = string.split(".")
ipv6 = string.split(":")
mac = string.split("-")
if (len(ipv4) == 4):
if (ipv4[0].isdigit() and ipv4[1].isdigit() and ipv4[2].isdigit() and
ipv4[3].isdigit()): return(string)
#endif
if (len(ipv6) > 1):
try:
int(ipv6[0], 16)
return(string)
except:
pass
#endtry
#endif
#
# Make sure there are hex digits between dashes, otherwise could be a
# valid DNS name with dashes.
#
if (len(mac) == 3):
for i in range(3):
try: int(mac[i], 16)
except: break
#endfor
#endif
try:
addr = socket.gethostbyname(string)
return(addr)
except:
if (lisp_is_alpine() == False): return("")
#endtry
#
# Try different approach on Alpine.
#
try:
addr = socket.getaddrinfo(string, 0)[0]
if (addr[3] != string): return("")
addr = addr[4][0]
except:
addr = ""
#endtry
return(addr)
#enddef
#
# lisp_ip_checksum
#
# Input to this function is 20-bytes in packed form. Calculate IP header
# checksum and place in byte 10 and byte 11 of header.
#
def lisp_ip_checksum(data, hdrlen=20):
if (len(data) < hdrlen):
lprint("IPv4 packet too short, length {}".format(len(data)))
return(data)
#endif
ip = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, hdrlen*2, 4):
checksum += int(ip[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
ip = data[0:10] + checksum + data[12::]
return(ip)
#enddef
#
# lisp_icmp_checksum
#
# Checksum a ICMP Destination Unreachable Too Big message. It will staticly
# checksum 36 bytes.
#
def lisp_icmp_checksum(data):
if (len(data) < 36):
lprint("ICMP packet too short, length {}".format(len(data)))
return(data)
#endif
icmp = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 36, 4):
checksum += int(icmp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 2 and 4.
#
checksum = struct.pack("H", checksum)
icmp = data[0:2] + checksum + data[4::]
return(icmp)
#enddef
#
# lisp_udp_checksum
#
# Calculate the UDP pseudo header checksum. The variable 'data' is a UDP
# packet buffer starting with the UDP header with the checksum field zeroed.
#
# What is returned is the UDP packet buffer with a non-zero/computed checksum.
#
# The UDP pseudo-header is prepended to the UDP packet buffer which the
# checksum runs over:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Source Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Destination Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Upper-Layer Packet Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | zero | Next Header |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lisp_udp_checksum(source, dest, data):
#
# Build pseudo-header for IPv6.
#
s = lisp_address(LISP_AFI_IPV6, source, LISP_IPV6_HOST_MASK_LEN, 0)
d = lisp_address(LISP_AFI_IPV6, dest, LISP_IPV6_HOST_MASK_LEN, 0)
udplen = socket.htonl(len(data))
next_header = socket.htonl(LISP_UDP_PROTOCOL)
pheader = s.pack_address()
pheader += d.pack_address()
pheader += struct.pack("II", udplen, next_header)
#
# Append UDP packet to pseudo-header. Add zeros to make 4 byte aligned.
#
udp = binascii.hexlify(pheader + data)
add = len(udp) % 4
for i in range(0,add): udp += "0"
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, len(udp), 4):
checksum += int(udp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at last 2 bytes of UDP header.
#
checksum = struct.pack("H", checksum)
udp = data[0:6] + checksum + data[8::]
return(udp)
#enddef
#
# lisp_igmp_checksum
#
# Comppute IGMP checksum. This is specialzed for an IGMP query 12-byte
# header.
#
def lisp_igmp_checksum(igmp):
g = binascii.hexlify(igmp)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 24, 4):
checksum += int(g[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
igmp = igmp[0:2] + checksum + igmp[4::]
return(igmp)
#enddef
#
# lisp_get_interface_address
#
# Based on supplied interface device, return IPv4 local interface address.
#
def lisp_get_interface_address(device):
#
# Check for illegal device name.
#
if (device not in netifaces.interfaces()): return(None)
#
# Check if there are no IPv4 addresses assigned to interface.
#
addresses = netifaces.ifaddresses(device)
if (netifaces.AF_INET not in addresses): return(None)
#
# Find first private address.
#
return_address = lisp_address(LISP_AFI_IPV4, "", 32, 0)
for addr in addresses[netifaces.AF_INET]:
addr_str = addr["addr"]
return_address.store_address(addr_str)
return(return_address)
#endfor
return(None)
#enddef
#
# lisp_get_input_interface
#
# Based on destination-MAC address of incoming pcap'ed packet, index into
# lisp_mymacs{} to get a interface name string (device name) for all
# interfaces that have the MAC address assigned.
#
# If dest-MAC is not us, look at source MAC to see if we are in a loopback
# situation testing application and xTR in the same system.
#
def lisp_get_input_interface(packet):
p = lisp_format_packet(packet[0:12])
macs = p.replace(" ", "")
da = macs[0:12]
sa = macs[12::]
try: my_sa = (sa in lisp_mymacs)
except: my_sa = False
if (da in lisp_mymacs): return(lisp_mymacs[da], sa, da, my_sa)
if (my_sa): return(lisp_mymacs[sa], sa, da, my_sa)
return(["?"], sa, da, my_sa)
#enddef
#
# lisp_get_local_interfaces
#
# Go populate the lisp.myinterfaces{} dictionary array. Key is device ID
# returned by the netifaces API.
#
def lisp_get_local_interfaces():
for device in netifaces.interfaces():
interface = lisp_interface(device)
interface.add_interface()
#endfor
return
#enddef
#
# lisp_get_loopback_address
#
# Get first loopback address on device lo which is not 127.0.0.1.
#
def lisp_get_loopback_address():
for addr in netifaces.ifaddresses("lo")[netifaces.AF_INET]:
if (addr["peer"] == "127.0.0.1"): continue
return(addr["peer"])
#endif
return(None)
#enddef
#
# lisp_is_mac_string
#
# Return True if the supplied string parameter is iin form of "xxxx-xxxx-xxxx".
# The input prefix could be "xxxx-xxxx-xxxx/48".
#
def lisp_is_mac_string(mac_str):
mac = mac_str.split("/")
if (len(mac) == 2): mac_str = mac[0]
return(len(mac_str) == 14 and mac_str.count("-") == 2)
#enddef
#
# lisp_get_local_macs
#
# Walk all interfaces, and for each ethernet interface, put the MAC address
# as a key into lisp_mymacs with a value of array of interface names.
#
def lisp_get_local_macs():
for device in netifaces.interfaces():
#
# Ignore bogus interface names that containers may create. Allow
# interfaces ones with colons, dashes and alphanumeric characters.
#
d = device.replace(":", "")
d = device.replace("-", "")
if (d.isalnum() == False): continue
#
# Need this for EOS because a "pimreg" interface will crash the call
# to netifaces.ifaddresses("pimreg").
#
try:
parms = netifaces.ifaddresses(device)
except:
continue
#endtry
if (netifaces.AF_LINK not in parms): continue
mac = parms[netifaces.AF_LINK][0]["addr"]
mac = mac.replace(":", "")
#
# GRE tunnels have strange MAC addresses (less than 48-bits). Ignore
# them.
#
if (len(mac) < 12): continue
if (mac not in lisp_mymacs): lisp_mymacs[mac] = []
lisp_mymacs[mac].append(device)
#endfor
lprint("Local MACs are: {}".format(lisp_mymacs))
return
#enddef
#
# lisp_get_local_rloc
#
# Use "ip addr show" on Linux and "ifconfig" on MacOS to get a local IPv4
# address. Get interface name from "netstat -rn" to grep for.
#
def lisp_get_local_rloc():
out = getoutput("netstat -rn | egrep 'default|0.0.0.0'")
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#
# Get last item on first line of output.
#
out = out.split("\n")[0]
device = out.split()[-1]
addr = ""
macos = lisp_is_macos()
if (macos):
out = getoutput("ifconfig {} | egrep 'inet '".format(device))
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
else:
cmd = 'ip addr show | egrep "inet " | egrep "{}"'.format(device)
out = getoutput(cmd)
if (out == ""):
cmd = 'ip addr show | egrep "inet " | egrep "global lo"'
out = getoutput(cmd)
#endif
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#endif
#
# Check for multi-line. And favor returning private address so NAT
# traversal is used in lig.
#
addr = ""
out = out.split("\n")
for line in out:
a = line.split()[1]
if (macos == False): a = a.split("/")[0]
address = lisp_address(LISP_AFI_IPV4, a, 32, 0)
return(address)
#endif
return(lisp_address(LISP_AFI_IPV4, addr, 32, 0))
#endif
#
# lisp_get_local_addresses
#
# Use netifaces module to get a IPv4 and IPv6 local RLOC of this system.
# Return an array of 2 elements where [0] is an IPv4 RLOC and [1] is an
# IPv6 RLOC.
#
# Stores data in lisp.lisp_myrlocs[].
#
def lisp_get_local_addresses():
global lisp_myrlocs
#
# Check to see if we should not get the first address. Use environment
# variable (1-based addressing) to determine which one to get. If the
# number of addresses are less than the index, use the last one.
#
# The format of the environment variable could be <number> or
# <device>:<number>. The format could also be "<device>:" but make sure
# the user typed in a ":".
#
device_select = None
index = 1
parm = os.getenv("LISP_ADDR_SELECT")
if (parm != None and parm != ""):
parm = parm.split(":")
if (len(parm) == 2):
device_select = parm[0]
index = parm[1]
else:
if (parm[0].isdigit()):
index = parm[0]
else:
device_select = parm[0]
#endif
#endif
index = 1 if (index == "") else int(index)
#endif
rlocs = [None, None, None]
rloc4 = lisp_address(LISP_AFI_IPV4, "", 32, 0)
rloc6 = lisp_address(LISP_AFI_IPV6, "", 128, 0)
device_iid = None
for device in netifaces.interfaces():
if (device_select != None and device_select != device): continue
addresses = netifaces.ifaddresses(device)
if (addresses == {}): continue
#
# Set instance-ID for interface.
#
device_iid = lisp_get_interface_instance_id(device, None)
#
# Look for a non-link-local and non-loopback address.
#
if (netifaces.AF_INET in addresses):
ipv4 = addresses[netifaces.AF_INET]
count = 0
for addr in ipv4:
rloc4.store_address(addr["addr"])
if (rloc4.is_ipv4_loopback()): continue
if (rloc4.is_ipv4_link_local()): continue
if (rloc4.address == 0): continue
count += 1
rloc4.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc4, False)): continue
rlocs[0] = rloc4
if (count == index): break
#endfor
#endif
if (netifaces.AF_INET6 in addresses):
ipv6 = addresses[netifaces.AF_INET6]
count = 0
for addr in ipv6:
addr_str = addr["addr"]
rloc6.store_address(addr_str)
if (rloc6.is_ipv6_string_link_local(addr_str)): continue
if (rloc6.is_ipv6_loopback()): continue
count += 1
rloc6.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc6, False)): continue
rlocs[1] = rloc6
if (count == index): break
#endfor
#endif
#
# Did we find an address? If not, loop and get the next interface.
#
if (rlocs[0] == None): continue
rlocs[2] = device
break
#endfor
addr1 = rlocs[0].print_address_no_iid() if rlocs[0] else "none"
addr2 = rlocs[1].print_address_no_iid() if rlocs[1] else "none"
device = rlocs[2] if rlocs[2] else "none"
device_select = " (user selected)" if device_select != None else ""
addr1 = red(addr1, False)
addr2 = red(addr2, False)
device = bold(device, False)
lprint("Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}". \
format(addr1, addr2, device, device_select, device_iid))
lisp_myrlocs = rlocs
return((rlocs[0] != None))
#enddef
#
# lisp_get_all_addresses
#
# Return a list of all local IPv4 and IPv6 addresses from kernel. This is
# going to be used for building pcap and iptables filters. So no loopback or
# link-local addresses are returned.
#
def lisp_get_all_addresses():
address_list = []
for interface in netifaces.interfaces():
try: entry = netifaces.ifaddresses(interface)
except: continue
if (netifaces.AF_INET in entry):
for addr in entry[netifaces.AF_INET]:
a = addr["addr"]
if (a.find("127.0.0.1") != -1): continue
address_list.append(a)
#endfor
#endif
if (netifaces.AF_INET6 in entry):
for addr in entry[netifaces.AF_INET6]:
a = addr["addr"]
if (a == "::1"): continue
if (a[0:5] == "fe80:"): continue
address_list.append(a)
#endfor
#endif
#endfor
return(address_list)
#enddef
#
# lisp_get_all_multicast_rles
#
# Grep lisp.config and get all multicast RLEs that appear in the configuration.
# Returns either an empty array or filled with one or more multicast addresses.
#
def lisp_get_all_multicast_rles():
rles = []
out = getoutput('egrep "rle-address =" ./lisp.config')
if (out == ""): return(rles)
lines = out.split("\n")
for line in lines:
if (line[0] == "#"): continue
rle = line.split("rle-address = ")[1]
rle_byte = int(rle.split(".")[0])
if (rle_byte >= 224 and rle_byte < 240): rles.append(rle)
#endfor
return(rles)
#enddef
#------------------------------------------------------------------------------
#
# LISP packet contents. This keeps state for a LISP encapsulated packet that
# is processed by an RTR and ETR.
#
class lisp_packet(object):
def __init__(self, packet):
self.outer_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_tos = 0
self.outer_ttl = 0
self.udp_sport = 0
self.udp_dport = 0
self.udp_length = 0
self.udp_checksum = 0
self.inner_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_sport = 0
self.inner_dport = 0
self.lisp_header = lisp_data_header()
self.packet = packet
self.inner_version = 0
self.outer_version = 0
self.encap_port = LISP_DATA_PORT
self.inner_is_fragment = False
self.packet_error = ""
self.gleaned_dest = False
#enddef
def encode(self, nonce):
#
# We could be running with no RLOCs found. If lisp_myrlocs[] is None,
# then self.outer_source will be LISP_AFI_NONE.
#
if (self.outer_source.is_null()): return(None)
#
# We have to build the LISP header here because if we are doing
# lisp-crypto, the ICV covers the LISP header. The function
# lisp_packet.encrypt() will put in the key-id.
#
if (nonce == None):
self.lisp_header.nonce(lisp_get_data_nonce())
elif (self.lisp_header.is_request_nonce(nonce)):
self.lisp_header.request_nonce(nonce)
else:
self.lisp_header.nonce(nonce)
#endif
self.lisp_header.instance_id(self.inner_dest.instance_id)
#
# Encrypt the packet. If something went wrong, send unencrypted packet
# by telling RLOC with key-id 0. For now, just use key-id 1. We are
# supporting just a single key.
#
self.lisp_header.key_id(0)
control = (self.lisp_header.get_instance_id() == 0xffffff)
if (lisp_data_plane_security and control == False):
addr_str = self.outer_dest.print_address_no_iid() + ":" + \
str(self.encap_port)
if (addr_str in lisp_crypto_keys_by_rloc_encap):
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]):
keys[1].use_count += 1
packet, encrypted = self.encrypt(keys[1], addr_str)
if (encrypted): self.packet = packet
#endif
#endif
#endif
#
# Start with UDP header. Call hash_packet() to set source-port value.
# Unless we are doing lisp-crypto and nat-traversal.
#
self.udp_checksum = 0
if (self.encap_port == LISP_DATA_PORT):
if (lisp_crypto_ephem_port == None):
if (self.gleaned_dest):
self.udp_sport = LISP_DATA_PORT
else:
self.hash_packet()
#endif
else:
self.udp_sport = lisp_crypto_ephem_port
#endif
else:
self.udp_sport = LISP_DATA_PORT
#endif
self.udp_dport = self.encap_port
self.udp_length = len(self.packet) + 16
#
# Swap UDP port numbers and length field since they are 16-bit values.
#
sport = socket.htons(self.udp_sport)
dport = socket.htons(self.udp_dport)
udp_len = socket.htons(self.udp_length)
udp = struct.pack("HHHH", sport, dport, udp_len, self.udp_checksum)
#
# Encode the LISP header.
#
lisp = self.lisp_header.encode()
#
# Now prepend all 3 headers, LISP, UDP, outer header. See lisp_packet.
# fix_outer_header() for byte-swap details for the frag-offset field.
#
if (self.outer_version == 4):
tl = socket.htons(self.udp_length + 20)
frag = socket.htons(0x4000)
outer = struct.pack("BBHHHBBH", 0x45, self.outer_tos, tl, 0xdfdf,
frag, self.outer_ttl, 17, 0)
outer += self.outer_source.pack_address()
outer += self.outer_dest.pack_address()
outer = lisp_ip_checksum(outer)
elif (self.outer_version == 6):
outer = b""
# short = 6 << 12
# short |= self.outer_tos << 4
# short = socket.htons(short)
# tl = socket.htons(self.udp_length)
# outer = struct.pack("HHHBB", short, 0, tl, 17, self.outer_ttl)
# outer += self.outer_source.pack_address()
# outer += self.outer_dest.pack_address()
else:
return(None)
#endif
self.packet = outer + udp + lisp + self.packet
return(self)
#enddef
def cipher_pad(self, packet):
length = len(packet)
if ((length % 16) != 0):
pad = (old_div(length, 16) + 1) * 16
packet = packet.ljust(pad)
#endif
return(packet)
#enddef
def encrypt(self, key, addr_str):
if (key == None or key.shared_key == None):
return([self.packet, False])
#endif
#
# Pad packet to multiple of 16 bytes and call AES cipher.
#
packet = self.cipher_pad(self.packet)
iv = key.get_iv()
ts = lisp_get_timestamp()
aead = None
if (key.cipher_suite == LISP_CS_25519_CHACHA):
encrypt = chacha.ChaCha(key.encrypt_key, iv).encrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
aesgcm = AES.new(k, AES.MODE_GCM, iv)
encrypt = aesgcm.encrypt
aead = aesgcm.digest
except:
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([self.packet, False])
#endtry
else:
k = binascii.unhexlify(key.encrypt_key)
encrypt = AES.new(k, AES.MODE_CBC, iv).encrypt
#endif
ciphertext = encrypt(packet)
if (ciphertext == None): return([self.packet, False])
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Chacha produced ciphertext in unicode for py2. Convert to raw-
# unicode-escape before proceeding, or else you can append to strings
# generated from different sources. Do this in do_icv() too.
#
ciphertext = ciphertext.encode("raw_unicode_escape")
#
# GCM requires 16 bytes of an AEAD MAC tag at the end of the
# ciphertext. Needed to interoperate with the Go implemenation of
# AES-GCM. The MAC digest was computed above.
#
if (aead != None): ciphertext += aead()
#
# Compute ICV and append to packet. ICV covers the LISP header, the
# IV, and the cipertext.
#
self.lisp_header.key_id(key.key_id)
lisp = self.lisp_header.encode()
icv = key.do_icv(lisp + iv + ciphertext, iv)
ps = 4 if (key.do_poly) else 8
string = bold("Encrypt", False)
cipher_str = bold(key.cipher_suite_string, False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): 0x{}...{}".format(auth, icv[0:ps], icv[-ps::])
dprint("{} for key-id: {}, {}, {}, {}-time: {} usec".format( \
string, key.key_id, addr_str, icv_str, cipher_str, ts))
icv = int(icv, 16)
if (key.do_poly):
icv1 = byte_swap_64((icv >> 64) & LISP_8_64_MASK)
icv2 = byte_swap_64(icv & LISP_8_64_MASK)
icv = struct.pack("QQ", icv1, icv2)
else:
icv1 = byte_swap_64((icv >> 96) & LISP_8_64_MASK)
icv2 = byte_swap_64((icv >> 32) & LISP_8_64_MASK)
icv3 = socket.htonl(icv & 0xffffffff)
icv = struct.pack("QQI", icv1, icv2, icv3)
#endif
return([iv + ciphertext + icv, True])
#enddef
def decrypt(self, packet, header_length, key, addr_str):
#
# Do ICV first. If it succeeds, then decrypt. Get ICV from packet and
# truncate packet to run hash over. Compare packet hash with computed
# hash.
#
if (key.do_poly):
icv1, icv2 = struct.unpack("QQ", packet[-16::])
packet_icv = byte_swap_64(icv1) << 64
packet_icv |= byte_swap_64(icv2)
packet_icv = lisp_hex_string(packet_icv).zfill(32)
packet = packet[0:-16]
ps = 4
hash_str = bold("poly", False)
else:
icv1, icv2, icv3 = struct.unpack("QQI", packet[-20::])
packet_icv = byte_swap_64(icv1) << 96
packet_icv |= byte_swap_64(icv2) << 32
packet_icv |= socket.htonl(icv3)
packet_icv = lisp_hex_string(packet_icv).zfill(40)
packet = packet[0:-20]
ps = 8
hash_str = bold("sha", False)
#endif
lisp = self.lisp_header.encode()
#
# Get the IV and use it to decrypt and authenticate..
#
if (key.cipher_suite == LISP_CS_25519_CHACHA):
iv_len = 8
cipher_str = bold("chacha", False)
elif (key.cipher_suite == LISP_CS_25519_GCM):
iv_len = 12
cipher_str = bold("aes-gcm", False)
else:
iv_len = 16
cipher_str = bold("aes-cbc", False)
#endif
iv = packet[0:iv_len]
#
# Compute ICV over LISP header and packet payload.
#
computed_icv = key.do_icv(lisp + packet, iv)
p_icv = "0x{}...{}".format(packet_icv[0:ps], packet_icv[-ps::])
c_icv = "0x{}...{}".format(computed_icv[0:ps], computed_icv[-ps::])
if (computed_icv != packet_icv):
self.packet_error = "ICV-error"
funcs = cipher_str + "/" + hash_str
fail = bold("ICV failed ({})".format(funcs), False)
icv_str = "packet-ICV {} != computed-ICV {}".format(p_icv, c_icv)
dprint(("{} from RLOC {}, receive-port: {}, key-id: {}, " + \
"packet dropped, {}").format(fail, red(addr_str, False),
self.udp_sport, key.key_id, icv_str))
dprint("{}".format(key.print_keys()))
#
# This is the 4-tuple NAT case. There another addr:port that
# should have the crypto-key the encapsulator is using. This is
# typically done on the RTR.
#
lisp_retry_decap_keys(addr_str, lisp + packet, iv, packet_icv)
return([None, False])
#endif
#
# Advance over IV for decryption.
#
packet = packet[iv_len::]
#
# Call AES or chacha cipher. Make sure for AES that
#
ts = lisp_get_timestamp()
if (key.cipher_suite == LISP_CS_25519_CHACHA):
decrypt = chacha.ChaCha(key.encrypt_key, iv).decrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
decrypt = AES.new(k, AES.MODE_GCM, iv).decrypt
except:
self.packet_error = "no-decrypt-key"
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([None, False])
#endtry
else:
if ((len(packet) % 16) != 0):
dprint("Ciphertext not multiple of 16 bytes, packet dropped")
return([None, False])
#endif
k = binascii.unhexlify(key.encrypt_key)
decrypt = AES.new(k, AES.MODE_CBC, iv).decrypt
#endif
plaintext = decrypt(packet)
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Now decrypt packet and return plaintext payload.
#
string = bold("Decrypt", False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): {}".format(auth, p_icv)
dprint("{} for key-id: {}, {}, {} (good), {}-time: {} usec". \
format(string, key.key_id, addr_str, icv_str, cipher_str, ts))
#
# Keep self.packet the outer header, UDP header, and LISP header.
# We will append the plaintext in the caller once we parse the inner
# packet length so we can truncate any padding the encryptor put on.
#
self.packet = self.packet[0:header_length]
return([plaintext, True])
#enddef
def fragment_outer(self, outer_hdr, inner_packet):
frag_len = 1000
#
# Break up packet payload in fragments and put in array to have
# IP header added in next loop below.
#
frags = []
offset = 0
length = len(inner_packet)
while (offset < length):
frag = inner_packet[offset::]
if (len(frag) > frag_len): frag = frag[0:frag_len]
frags.append(frag)
offset += len(frag)
#endwhile
#
# Now fix outer IPv4 header with fragment-offset values and add the
# IPv4 value.
#
fragments = []
offset = 0
for frag in frags:
#
# Set frag-offset field in outer IPv4 header.
#
fo = offset if (frag == frags[-1]) else 0x2000 + offset
fo = socket.htons(fo)
outer_hdr = outer_hdr[0:6] + struct.pack("H", fo) + outer_hdr[8::]
#
# Set total-length field in outer IPv4 header and checksum.
#
l = socket.htons(len(frag) + 20)
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragments.append(outer_hdr + frag)
offset += len(frag) / 8
#endfor
return(fragments)
#enddef
def send_icmp_too_big(self, inner_packet):
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
elapsed = time.time() - lisp_last_icmp_too_big_sent
if (elapsed < LISP_ICMP_TOO_BIG_RATE_LIMIT):
lprint("Rate limit sending ICMP Too-Big to {}".format( \
self.inner_source.print_address_no_iid()))
return(False)
#endif
#
# Destination Unreachable Message - Too Big Message
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 3 | Code = 4 | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | unused | MTU = 1400 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Internet Header + 64 bits of Original Data Datagram |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
mtu = socket.htons(1400)
icmp = struct.pack("BBHHH", 3, 4, 0, 0, mtu)
icmp += inner_packet[0:20+8]
icmp = lisp_icmp_checksum(icmp)
#
# Build IP header. Make source of ICMP invoking packet the destination
# and our address the source. We can get our address when we thought
# we could encap. So lisp_packet.outer_source has the RLOC address of
# this system.
#
host = inner_packet[12:16]
dest = self.inner_source.print_address_no_iid()
me = self.outer_source.pack_address()
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# host byte order. We need to build the total-length field just like
# lisp_packet.encode(), checksum, and then fix outer header. So that
# logic is semantically repliciated here. Same logic is in lisp_packet.
# fragment() as well.
#
tl = socket.htons(20+36)
ip = struct.pack("BBHHHBBH", 0x45, 0, tl, 0, 0, 32, 1, 0) + me + host
ip = lisp_ip_checksum(ip)
ip = self.fix_outer_header(ip)
ip += icmp
tb = bold("Too-Big", False)
lprint("Send ICMP {} to {}, mtu 1400: {}".format(tb, dest,
lisp_format_packet(ip)))
try:
lisp_icmp_raw_socket.sendto(ip, (dest, 0))
except socket.error as e:
lprint("lisp_icmp_raw_socket.sendto() failed: {}".format(e))
return(False)
#endtry
#
# Caller function sends packet on raw socket. Kernel routes out
# interface to destination.
#
lisp_last_icmp_too_big_sent = lisp_get_timestamp()
return(True)
def fragment(self):
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
packet = self.fix_outer_header(self.packet)
#
# If inner header is IPv4, we will fragment the inner header and encap
# each fragment. If the inner header is IPv6, we will not add the
# Fragmentation Header into the inner IPv6 packet.
#
length = len(packet)
if (length <= 1500): return([packet], "Fragment-None")
packet = self.packet
#
# Fragment outer IPv4 header if inner packet is IPv6 (or Mac frame).
# We cannot fragment IPv6 packet since we are not the source.
#
if (self.inner_version != 4):
ident = random.randint(0, 0xffff)
outer_hdr = packet[0:4] + struct.pack("H", ident) + packet[6:20]
inner_packet = packet[20::]
fragments = self.fragment_outer(outer_hdr, inner_packet)
return(fragments, "Fragment-Outer")
#endif
#
# Fragment inner IPv4 packet.
#
outer_hdr_len = 56 if (self.outer_version == 6) else 36
outer_hdr = packet[0:outer_hdr_len]
inner_hdr = packet[outer_hdr_len: outer_hdr_len + 20]
inner_packet = packet[outer_hdr_len + 20::]
#
# If DF-bit is set, don't fragment packet. Do MTU discovery if
# configured with env variable.
#
frag_field = struct.unpack("H", inner_hdr[6:8])[0]
frag_field = socket.ntohs(frag_field)
if (frag_field & 0x4000):
if (lisp_icmp_raw_socket != None):
inner = packet[outer_hdr_len::]
if (self.send_icmp_too_big(inner)): return([], None)
#endif
if (lisp_ignore_df_bit):
frag_field &= ~0x4000
else:
df_bit = bold("DF-bit set", False)
dprint("{} in inner header, packet discarded".format(df_bit))
return([], "Fragment-None-DF-bit")
#endif
#endif
offset = 0
length = len(inner_packet)
fragments = []
while (offset < length):
fragments.append(inner_packet[offset:offset+1400])
offset += 1400
#endwhile
#
# Now put inner header and outer header on each fragment.
#
frags = fragments
fragments = []
mf = True if frag_field & 0x2000 else False
frag_field = (frag_field & 0x1fff) * 8
for frag in frags:
#
# Set fragment-offset and MF bit if not last fragment.
#
ff = old_div(frag_field, 8)
if (mf):
ff |= 0x2000
elif (frag != frags[-1]):
ff |= 0x2000
#endif
ff = socket.htons(ff)
inner_hdr = inner_hdr[0:6] + struct.pack("H", ff) + inner_hdr[8::]
#
# Set length of fragment, set up offset for next fragment-offset,
# and header checksum fragment packet. Then prepend inner header
# to payload.
#
length = len(frag)
frag_field += length
l = socket.htons(length + 20)
inner_hdr = inner_hdr[0:2] + struct.pack("H", l) + \
inner_hdr[4:10] + struct.pack("H", 0) + inner_hdr[12::]
inner_hdr = lisp_ip_checksum(inner_hdr)
fragment = inner_hdr + frag
#
# Change outer header length and header checksum if IPv4 outer
# header. If IPv6 outer header, raw sockets prepends the header.
#
length = len(fragment)
if (self.outer_version == 4):
l = length + outer_hdr_len
length += 16
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + \
outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragment = outer_hdr + fragment
fragment = self.fix_outer_header(fragment)
#endif
#
# Finally fix outer UDP header length. Byte-swap it.
#
udp_len_index = outer_hdr_len - 12
l = socket.htons(length)
fragment = fragment[0:udp_len_index] + struct.pack("H", l) + \
fragment[udp_len_index+2::]
fragments.append(fragment)
#endfor
return(fragments, "Fragment-Inner")
#enddef
def fix_outer_header(self, packet):
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# in host byte order. So have to byte-swapped here. But when testing
# we (UPC guys) discovered the frag field didn't need swapping. The
# conclusion is that byte-swapping is necessary for MacOS but not for
# Linux OSes.
#
if (self.outer_version == 4 or self.inner_version == 4):
if (lisp_is_macos()):
packet = packet[0:2] + packet[3:4] + packet[2:3] + \
packet[4:6] + packet[7:8] + packet[6:7] + packet[8::]
else:
packet = packet[0:2] + packet[3:4] + packet[2:3] + packet[4::]
#endif
#endif
return(packet)
#enddef
def send_packet(self, lisp_raw_socket, dest):
if (lisp_flow_logging and dest != self.inner_dest): self.log_flow(True)
dest = dest.print_address_no_iid()
fragments, in_or_out = self.fragment()
for fragment in fragments:
if (len(fragments) != 1):
self.packet = fragment
self.print_packet(in_or_out, True)
#endif
try: lisp_raw_socket.sendto(fragment, (dest, 0))
except socket.error as e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#endfor
#enddef
def send_l2_packet(self, l2_socket, mac_header):
if (l2_socket == None):
lprint("No layer-2 socket, drop IPv6 packet")
return
#endif
if (mac_header == None):
lprint("Could not build MAC header, drop IPv6 packet")
return
#endif
packet = mac_header + self.packet
# try: l2_socket.send(packet)
# except socket.error as e:
# lprint("send_l2_packet(): socket.send() failed: {}".format(e))
# #endtry
# return
#
# Use tuntap tunnel interface instead of raw sockets for IPv6
# decapsulated packets.
#
l2_socket.write(packet)
return
#enddef
def bridge_l2_packet(self, eid, db):
try: dyn_eid = db.dynamic_eids[eid.print_address_no_iid()]
except: return
try: interface = lisp_myinterfaces[dyn_eid.interface]
except: return
try:
socket = interface.get_bridge_socket()
if (socket == None): return
except: return
try: socket.send(self.packet)
except socket.error as e:
lprint("bridge_l2_packet(): socket.send() failed: {}".format(e))
#endtry
#enddef
def is_lisp_packet(self, packet):
udp = (struct.unpack("B", packet[9:10])[0] == LISP_UDP_PROTOCOL)
if (udp == False): return(False)
port = struct.unpack("H", packet[22:24])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
port = struct.unpack("H", packet[20:22])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
return(False)
#enddef
def decode(self, is_lisp_packet, lisp_ipc_socket, stats):
self.packet_error = ""
packet = self.packet
orig_len = len(packet)
L3 = L2 = True
#
# Get version number of outer header so we can decode outer addresses.
#
header_len = 0
iid = self.lisp_header.get_instance_id()
if (is_lisp_packet):
version = struct.unpack("B", packet[0:1])[0]
self.outer_version = version >> 4
if (self.outer_version == 4):
#
# MacOS is zeroing the IP header checksum for a raw socket.
# If we receive this, bypass the checksum calculation.
#
orig_checksum = struct.unpack("H", packet[10:12])[0]
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
if (orig_checksum != 0 or lisp_is_macos() == False):
self.packet_error = "checksum-error"
if (stats):
stats[self.packet_error].increment(orig_len)
#endif
lprint("IPv4 header checksum failed for outer header")
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
afi = LISP_AFI_IPV4
offset = 12
self.outer_tos = struct.unpack("B", packet[1:2])[0]
self.outer_ttl = struct.unpack("B", packet[8:9])[0]
header_len = 20
elif (self.outer_version == 6):
afi = LISP_AFI_IPV6
offset = 8
tos = struct.unpack("H", packet[0:2])[0]
self.outer_tos = (socket.ntohs(tos) >> 4) & 0xff
self.outer_ttl = struct.unpack("B", packet[7:8])[0]
header_len = 40
else:
self.packet_error = "outer-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode outer header")
return(None)
#endif
self.outer_source.afi = afi
self.outer_dest.afi = afi
addr_length = self.outer_source.addr_length()
self.outer_source.unpack_address(packet[offset:offset+addr_length])
offset += addr_length
self.outer_dest.unpack_address(packet[offset:offset+addr_length])
packet = packet[header_len::]
self.outer_source.mask_len = self.outer_source.host_mask_len()
self.outer_dest.mask_len = self.outer_dest.host_mask_len()
#
# Get UDP fields
#
short = struct.unpack("H", packet[0:2])[0]
self.udp_sport = socket.ntohs(short)
short = struct.unpack("H", packet[2:4])[0]
self.udp_dport = socket.ntohs(short)
short = struct.unpack("H", packet[4:6])[0]
self.udp_length = socket.ntohs(short)
short = struct.unpack("H", packet[6:8])[0]
self.udp_checksum = socket.ntohs(short)
packet = packet[8::]
#
# Determine what is inside, a packet or a frame.
#
L3 = (self.udp_dport == LISP_DATA_PORT or
self.udp_sport == LISP_DATA_PORT)
L2 = (self.udp_dport in (LISP_L2_DATA_PORT, LISP_VXLAN_DATA_PORT))
#
# Get LISP header fields.
#
if (self.lisp_header.decode(packet) == False):
self.packet_error = "lisp-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
lprint("Cannot decode LISP header")
return(None)
#endif
packet = packet[8::]
iid = self.lisp_header.get_instance_id()
header_len += 16
#endif
if (iid == 0xffffff): iid = 0
#
# Time to decrypt if K-bits set.
#
decrypted = False
key_id = self.lisp_header.k_bits
if (key_id):
addr_str = lisp_get_crypto_decap_lookup_key(self.outer_source,
self.udp_sport)
if (addr_str == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} for key-id {} to decrypt packet".format(ks, key_id))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
key = lisp_crypto_keys_by_rloc_decap[addr_str][key_id]
if (key == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} to decrypt packet from RLOC {}".format(ks,
red(addr_str, False)))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Decrypt and continue processing inner header.
#
key.use_count += 1
packet, decrypted = self.decrypt(packet, header_len, key, addr_str)
if (decrypted == False):
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Chacha produced plaintext in unicode for py2. Convert to raw-
# unicode-escape before proceedingl Do this in do_icv() too.
#
packet = packet.encode("raw_unicode_escape")
#endif
#
# Get inner header fields.
#
version = struct.unpack("B", packet[0:1])[0]
self.inner_version = version >> 4
if (L3 and self.inner_version == 4 and version >= 0x45):
packet_len = socket.ntohs(struct.unpack("H", packet[2:4])[0])
self.inner_tos = struct.unpack("B", packet[1:2])[0]
self.inner_ttl = struct.unpack("B", packet[8:9])[0]
self.inner_protocol = struct.unpack("B", packet[9:10])[0]
self.inner_source.afi = LISP_AFI_IPV4
self.inner_dest.afi = LISP_AFI_IPV4
self.inner_source.unpack_address(packet[12:16])
self.inner_dest.unpack_address(packet[16:20])
frag_field = socket.ntohs(struct.unpack("H", packet[6:8])[0])
self.inner_is_fragment = (frag_field & 0x2000 or frag_field != 0)
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[20:22])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[22:24])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L3 and self.inner_version == 6 and version >= 0x60):
packet_len = socket.ntohs(struct.unpack("H", packet[4:6])[0]) + 40
tos = struct.unpack("H", packet[0:2])[0]
self.inner_tos = (socket.ntohs(tos) >> 4) & 0xff
self.inner_ttl = struct.unpack("B", packet[7:8])[0]
self.inner_protocol = struct.unpack("B", packet[6:7])[0]
self.inner_source.afi = LISP_AFI_IPV6
self.inner_dest.afi = LISP_AFI_IPV6
self.inner_source.unpack_address(packet[8:24])
self.inner_dest.unpack_address(packet[24:40])
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[40:42])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[42:44])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L2):
packet_len = len(packet)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_source.afi = LISP_AFI_MAC
self.inner_dest.afi = LISP_AFI_MAC
self.inner_dest.unpack_address(self.swap_mac(packet[0:6]))
self.inner_source.unpack_address(self.swap_mac(packet[6:12]))
elif (self.lisp_header.get_instance_id() == 0xffffff):
if (lisp_flow_logging): self.log_flow(False)
return(self)
else:
self.packet_error = "bad-inner-version"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode encapsulation, header version {}".format(\
hex(version)))
packet = lisp_format_packet(packet[0:20])
lprint("Packet header: {}".format(packet))
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(None)
#endif
self.inner_source.mask_len = self.inner_source.host_mask_len()
self.inner_dest.mask_len = self.inner_dest.host_mask_len()
self.inner_source.instance_id = iid
self.inner_dest.instance_id = iid
#
# If we are configured to do Nonce-Echoing, do lookup on source-EID
# to obtain source RLOC to store nonce to echo.
#
if (lisp_nonce_echoing and is_lisp_packet):
echo_nonce = lisp_get_echo_nonce(self.outer_source, None)
if (echo_nonce == None):
rloc_str = self.outer_source.print_address_no_iid()
echo_nonce = lisp_echo_nonce(rloc_str)
#endif
nonce = self.lisp_header.get_nonce()
if (self.lisp_header.is_e_bit_set()):
echo_nonce.receive_request(lisp_ipc_socket, nonce)
elif (echo_nonce.request_nonce_sent):
echo_nonce.receive_echo(lisp_ipc_socket, nonce)
#endif
#endif
#
# If we decrypted, we may have to truncate packet if the encrypter
# padded the packet.
#
if (decrypted): self.packet += packet[:packet_len]
#
# Log a packet that was parsed correctly.
#
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(self)
#enddef
def swap_mac(self, mac):
return(mac[1] + mac[0] + mac[3] + mac[2] + mac[5] + mac[4])
#enddef
def strip_outer_headers(self):
offset = 16
offset += 20 if (self.outer_version == 4) else 40
self.packet = self.packet[offset::]
return(self)
#enddef
def hash_ports(self):
packet = self.packet
version = self.inner_version
hashval = 0
if (version == 4):
protocol = struct.unpack("B", packet[9:10])[0]
if (self.inner_is_fragment): return(protocol)
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[20:24])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
if (version == 6):
protocol = struct.unpack("B", packet[6:7])[0]
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[40:44])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
return(hashval)
#enddef
def hash_packet(self):
hashval = self.inner_source.address ^ self.inner_dest.address
hashval += self.hash_ports()
if (self.inner_version == 4):
hashval = (hashval >> 16) ^ (hashval & 0xffff)
elif (self.inner_version == 6):
hashval = (hashval >> 64) ^ (hashval & 0xffffffffffffffff)
hashval = (hashval >> 32) ^ (hashval & 0xffffffff)
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
self.udp_sport = 0xf000 | (hashval & 0xfff)
#enddef
def print_packet(self, s_or_r, is_lisp_packet):
if (is_lisp_packet == False):
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(("{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..."). \
format(bold(s_or_r, False),
green(iaddr_str, False), self.inner_tos,
self.inner_ttl, len(self.packet),
lisp_format_packet(self.packet[0:60])))
return
#endif
if (s_or_r.find("Receive") != -1):
ed = "decap"
ed += "-vxlan" if self.udp_dport == LISP_VXLAN_DATA_PORT else ""
else:
ed = s_or_r
if (ed in ["Send", "Replicate"] or ed.find("Fragment") != -1):
ed = "encap"
#endif
#endif
oaddr_str = "{} -> {}".format(self.outer_source.print_address_no_iid(),
self.outer_dest.print_address_no_iid())
#
# Special case where Info-Request is inside of a 4341 packet for
# NAT-traversal.
#
if (self.lisp_header.get_instance_id() == 0xffffff):
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, ")
line += bold("control-packet", False) + ": {} ..."
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport,
self.udp_dport, lisp_format_packet(self.packet[0:56])))
return
else:
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + \
"inner tos/ttl: {}/{}, length: {}, {}, packet: {} ...")
#endif
if (self.lisp_header.k_bits):
if (ed == "encap"): ed = "encrypt/encap"
if (ed == "decap"): ed = "decap/decrypt"
#endif
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport, self.udp_dport,
green(iaddr_str, False), self.inner_tos, self.inner_ttl,
len(self.packet), self.lisp_header.print_header(ed),
lisp_format_packet(self.packet[0:56])))
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.inner_source, self.inner_dest))
#enddef
def get_raw_socket(self):
iid = str(self.lisp_header.get_instance_id())
if (iid == "0"): return(None)
if (iid not in lisp_iid_to_interface): return(None)
interface = lisp_iid_to_interface[iid]
s = interface.get_socket()
if (s == None):
string = bold("SO_BINDTODEVICE", False)
enforce = (os.getenv("LISP_ENFORCE_BINDTODEVICE") != None)
lprint("{} required for multi-tenancy support, {} packet".format( \
string, "drop" if enforce else "forward"))
if (enforce): return(None)
#endif
iid = bold(iid, False)
d = bold(interface.device, False)
dprint("Send packet on instance-id {} interface {}".format(iid, d))
return(s)
#enddef
def log_flow(self, encap):
global lisp_flow_log
dump = os.path.exists("./log-flows")
if (len(lisp_flow_log) == LISP_FLOW_LOG_SIZE or dump):
args = [lisp_flow_log]
lisp_flow_log = []
threading.Thread(target=lisp_write_flow_log, args=args).start()
if (dump): os.system("rm ./log-flows")
return
#endif
ts = datetime.datetime.now()
lisp_flow_log.append([ts, encap, self.packet, self])
#endif
def print_flow(self, ts, encap, packet):
ts = ts.strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
flow = "{}: {}".format(ts, "encap" if encap else "decap")
osrc = red(self.outer_source.print_address_no_iid(), False)
odst = red(self.outer_dest.print_address_no_iid(), False)
isrc = green(self.inner_source.print_address(), False)
idst = green(self.inner_dest.print_address(), False)
if (self.lisp_header.get_instance_id() == 0xffffff):
flow += " {}:{} -> {}:{}, LISP control message type {}\n"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
self.inner_version)
return(flow)
#endif
if (self.outer_dest.is_null() == False):
flow += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
len(packet), self.outer_tos, self.outer_ttl)
#endif
#
# Can't look at inner header if encrypted. Protecting user privacy.
#
if (self.lisp_header.k_bits != 0):
error = "\n"
if (self.packet_error != ""):
error = " ({})".format(self.packet_error) + error
#endif
flow += ", encrypted" + error
return(flow)
#endif
#
# Position to inner header.
#
if (self.outer_dest.is_null() == False):
packet = packet[36::] if self.outer_version == 4 else packet[56::]
#endif
protocol = packet[9:10] if self.inner_version == 4 else packet[6:7]
protocol = struct.unpack("B", protocol)[0]
flow += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
flow = flow.format(isrc, idst, len(packet), self.inner_tos,
self.inner_ttl, protocol)
#
# Show some popular transport layer data.
#
if (protocol in [6, 17]):
ports = packet[20:24] if self.inner_version == 4 else packet[40:44]
if (len(ports) == 4):
ports = socket.ntohl(struct.unpack("I", ports)[0])
flow += ", ports {} -> {}".format(ports >> 16, ports & 0xffff)
#endif
elif (protocol == 1):
seq = packet[26:28] if self.inner_version == 4 else packet[46:48]
if (len(seq) == 2):
seq = socket.ntohs(struct.unpack("H", seq)[0])
flow += ", icmp-seq {}".format(seq)
#endif
#endof
if (self.packet_error != ""):
flow += " ({})".format(self.packet_error)
#endif
flow += "\n"
return(flow)
#endif
def is_trace(self):
ports = [self.inner_sport, self.inner_dport]
return(self.inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in ports)
#enddef
#endclass
#
# LISP encapsulation header definition.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4341 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L |N|L|E|V|I|P|K|K| Nonce/Map-Version |
# I \ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# S / | Instance ID/Locator-Status-Bits |
# P +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
class lisp_data_header(object):
def __init__(self):
self.first_long = 0
self.second_long = 0
self.k_bits = 0
#enddef
def print_header(self, e_or_d):
first_long = lisp_hex_string(self.first_long & 0xffffff)
second_long = lisp_hex_string(self.second_long).zfill(8)
line = ("{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + \
"iid/lsb: {}")
return(line.format(bold(e_or_d, False),
"N" if (self.first_long & LISP_N_BIT) else "n",
"L" if (self.first_long & LISP_L_BIT) else "l",
"E" if (self.first_long & LISP_E_BIT) else "e",
"V" if (self.first_long & LISP_V_BIT) else "v",
"I" if (self.first_long & LISP_I_BIT) else "i",
"P" if (self.first_long & LISP_P_BIT) else "p",
"K" if (self.k_bits in [2,3]) else "k",
"K" if (self.k_bits in [1,3]) else "k",
first_long, second_long))
#enddef
def encode(self):
packet_format = "II"
first_long = socket.htonl(self.first_long)
second_long = socket.htonl(self.second_long)
header = struct.pack(packet_format, first_long, second_long)
return(header)
#enddef
def decode(self, packet):
packet_format = "II"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long, second_long = \
struct.unpack(packet_format, packet[:format_size])
self.first_long = socket.ntohl(first_long)
self.second_long = socket.ntohl(second_long)
self.k_bits = (self.first_long & LISP_K_BITS) >> 24
return(True)
#enddef
def key_id(self, key_id):
self.first_long &= ~(0x3 << 24)
self.first_long |= ((key_id & 0x3) << 24)
self.k_bits = key_id
#enddef
def nonce(self, nonce):
self.first_long |= LISP_N_BIT
self.first_long |= nonce
#enddef
def map_version(self, version):
self.first_long |= LISP_V_BIT
self.first_long |= version
#enddef
def instance_id(self, iid):
if (iid == 0): return
self.first_long |= LISP_I_BIT
self.second_long &= 0xff
self.second_long |= (iid << 8)
#enddef
def get_instance_id(self):
return((self.second_long >> 8) & 0xffffff)
#enddef
def locator_status_bits(self, lsbs):
self.first_long |= LISP_L_BIT
self.second_long &= 0xffffff00
self.second_long |= (lsbs & 0xff)
#enddef
def is_request_nonce(self, nonce):
return(nonce & 0x80000000)
#enddef
def request_nonce(self, nonce):
self.first_long |= LISP_E_BIT
self.first_long |= LISP_N_BIT
self.first_long |= (nonce & 0xffffff)
#enddef
def is_e_bit_set(self):
return(self.first_long & LISP_E_BIT)
#enddef
def get_nonce(self):
return(self.first_long & 0xffffff)
#enddef
#endclass
class lisp_echo_nonce(object):
def __init__(self, rloc_str):
self.rloc_str = rloc_str
self.rloc = lisp_address(LISP_AFI_NONE, rloc_str, 0, 0)
self.request_nonce_sent = None
self.echo_nonce_sent = None
self.last_request_nonce_sent = None
self.last_new_request_nonce_sent = None
self.last_echo_nonce_sent = None
self.last_new_echo_nonce_sent = None
self.request_nonce_rcvd = None
self.echo_nonce_rcvd = None
self.last_request_nonce_rcvd = None
self.last_echo_nonce_rcvd = None
self.last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list[rloc_str] = self
#enddef
def send_ipc(self, ipc_socket, ipc):
source = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
dest = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc(ipc, source)
lisp_ipc(ipc, ipc_socket, dest)
#enddef
def send_request_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%R%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def send_echo_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%E%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def receive_request(self, ipc_socket, nonce):
old_nonce = self.request_nonce_rcvd
self.request_nonce_rcvd = nonce
self.last_request_nonce_rcvd = lisp_get_timestamp()
if (lisp_i_am_rtr): return
if (old_nonce != nonce): self.send_request_ipc(ipc_socket, nonce)
#enddef
def receive_echo(self, ipc_socket, nonce):
if (self.request_nonce_sent != nonce): return
self.last_echo_nonce_rcvd = lisp_get_timestamp()
if (self.echo_nonce_rcvd == nonce): return
self.echo_nonce_rcvd = nonce
if (lisp_i_am_rtr): return
self.send_echo_ipc(ipc_socket, nonce)
#enddef
def get_request_or_echo_nonce(self, ipc_socket, remote_rloc):
#
# If we are in both request-nonce and echo-nonce mode, let the
# higher IP addressed RLOC be in request mode.
#
if (self.request_nonce_sent and self.echo_nonce_sent and remote_rloc):
local_rloc = lisp_myrlocs[0] if remote_rloc.is_ipv4() \
else lisp_myrlocs[1]
if (remote_rloc.address > local_rloc.address):
a = "exit"
self.request_nonce_sent = None
else:
a = "stay in"
self.echo_nonce_sent = None
#endif
c = bold("collision", False)
l = red(local_rloc.print_address_no_iid(), False)
r = red(remote_rloc.print_address_no_iid(), False)
lprint("Echo nonce {}, {} -> {}, {} request-nonce mode".format(c,
l, r, a))
#endif
#
# If we are echoing, return echo-nonce. Or get out of echo-nonce mode.
#
if (self.echo_nonce_sent != None):
nonce = self.echo_nonce_sent
e = bold("Echoing", False)
lprint("{} nonce 0x{} to {}".format(e,
lisp_hex_string(nonce), red(self.rloc_str, False)))
self.last_echo_nonce_sent = lisp_get_timestamp()
self.echo_nonce_sent = None
return(nonce)
#endif
#endif
#
# Should we stop requesting nonce-echoing? Only do so if we received
# a echo response and some time (10 seconds) has past.
#
nonce = self.request_nonce_sent
last = self.last_request_nonce_sent
if (nonce and last != None):
if (time.time() - last >= LISP_NONCE_ECHO_INTERVAL):
self.request_nonce_sent = None
lprint("Stop request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
return(None)
#endif
#endif
#
# Start echoing the nonce. Get a new nonce. If a echo-nonce is stored
# use the same nonce as last time regardless if we received an echo
# response. High-order bit set is telling caller to set the e-bit in
# header.
#
if (nonce == None):
nonce = lisp_get_data_nonce()
if (self.recently_requested()): return(nonce)
self.request_nonce_sent = nonce
lprint("Start request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
self.last_new_request_nonce_sent = lisp_get_timestamp()
#
# Send the request-nonce to the ETR so it can tell us when the
# other side has echoed this request-nonce.
#
if (lisp_i_am_itr == False): return(nonce | 0x80000000)
self.send_request_ipc(ipc_socket, nonce)
else:
lprint("Continue request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
#endif
#
# Continue sending request-nonce. But if we never received an echo,
# don't update timer.
#
self.last_request_nonce_sent = lisp_get_timestamp()
return(nonce | 0x80000000)
#enddef
def request_nonce_timeout(self):
if (self.request_nonce_sent == None): return(False)
if (self.request_nonce_sent == self.echo_nonce_rcvd): return(False)
elapsed = time.time() - self.last_request_nonce_sent
last_resp = self.last_echo_nonce_rcvd
return(elapsed >= LISP_NONCE_ECHO_INTERVAL and last_resp == None)
#enddef
def recently_requested(self):
last_resp = self.last_request_nonce_sent
if (last_resp == None): return(False)
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def recently_echoed(self):
if (self.request_nonce_sent == None): return(True)
#
# Check how long its been since last received echo.
#
last_resp = self.last_good_echo_nonce_rcvd
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
if (elapsed <= LISP_NONCE_ECHO_INTERVAL): return(True)
#
# If last received echo was a while ago and a new request-nonce was
# sent recently, say the echo happen so we can bootstrap a new request
# and echo exchange.
#
last_resp = self.last_new_request_nonce_sent
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def change_state(self, rloc):
if (rloc.up_state() and self.recently_echoed() == False):
down = bold("down", False)
good_echo = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
lprint("Take {} {}, last good echo: {}".format( \
red(self.rloc_str, False), down, good_echo))
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
return
#endif
if (rloc.no_echoed_nonce_state() == False): return
if (self.recently_requested() == False):
up = bold("up", False)
lprint("Bring {} {}, retry request-nonce mode".format( \
red(self.rloc_str, False), up))
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
#endif
#enddef
def print_echo_nonce(self):
rs = lisp_print_elapsed(self.last_request_nonce_sent)
er = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
es = lisp_print_elapsed(self.last_echo_nonce_sent)
rr = lisp_print_elapsed(self.last_request_nonce_rcvd)
s = space(4)
output = "Nonce-Echoing:\n"
output += ("{}Last request-nonce sent: {}\n{}Last echo-nonce " + \
"received: {}\n").format(s, rs, s, er)
output += ("{}Last request-nonce received: {}\n{}Last echo-nonce " + \
"sent: {}").format(s, rr, s, es)
return(output)
#enddef
#endclass
#
# lisp_keys
#
# Class to hold Diffie-Hellman keys. For ECDH use RFC5114 gx value of
# "192-bit Random ECP Group".
#
class lisp_keys(object):
def __init__(self, key_id, do_curve=True, do_chacha=use_chacha,
do_poly=use_poly):
self.uptime = lisp_get_timestamp()
self.last_rekey = None
self.rekey_count = 0
self.use_count = 0
self.key_id = key_id
self.cipher_suite = LISP_CS_1024
self.dh_g_value = LISP_CS_1024_G
self.dh_p_value = LISP_CS_1024_P
self.curve25519 = None
self.cipher_suite_string = ""
if (do_curve):
if (do_chacha):
self.cipher_suite = LISP_CS_25519_CHACHA
self.cipher_suite_string = "chacha"
elif (os.getenv("LISP_USE_AES_GCM") != None):
self.cipher_suite = LISP_CS_25519_GCM
self.cipher_suite_string = "aes-gcm"
else:
self.cipher_suite = LISP_CS_25519_CBC
self.cipher_suite_string = "aes-cbc"
#endif
self.local_private_key = random.randint(0, 2**128-1)
key = lisp_hex_string(self.local_private_key).zfill(32)
self.curve25519 = curve25519.Private(key.encode())
else:
self.local_private_key = random.randint(0, 0x1fff)
#endif
self.local_public_key = self.compute_public_key()
self.remote_public_key = None
self.shared_key = None
self.encrypt_key = None
self.icv_key = None
self.icv = poly1305 if do_poly else hashlib.sha256
self.iv = None
self.get_iv()
self.do_poly = do_poly
#enddef
def copy_keypair(self, key):
self.local_private_key = key.local_private_key
self.local_public_key = key.local_public_key
self.curve25519 = key.curve25519
#enddef
def get_iv(self):
if (self.iv == None):
self.iv = random.randint(0, LISP_16_128_MASK)
else:
self.iv += 1
#endif
iv = self.iv
if (self.cipher_suite == LISP_CS_25519_CHACHA):
iv = struct.pack("Q", iv & LISP_8_64_MASK)
elif (self.cipher_suite == LISP_CS_25519_GCM):
ivh = struct.pack("I", (iv >> 64) & LISP_4_32_MASK)
ivl = struct.pack("Q", iv & LISP_8_64_MASK)
iv = ivh + ivl
else:
iv = struct.pack("QQ", iv >> 64, iv & LISP_8_64_MASK)
return(iv)
#enddef
def key_length(self, key):
if (isinstance(key, int)): key = self.normalize_pub_key(key)
return(old_div(len(key), 2))
#enddef
def print_key(self, key):
k = self.normalize_pub_key(key)
top = k[0:4].decode()
bot = k[-4::].decode()
return("0x{}...{}({})".format(top, bot, self.key_length(k)))
#enddef
def normalize_pub_key(self, key):
if (isinstance(key, int)):
key = lisp_hex_string(key).zfill(256)
return(key)
#endif
if (self.curve25519): return(binascii.hexlify(key))
return(key)
#enddef
def print_keys(self, do_bold=True):
l = bold("local-key: ", False) if do_bold else "local-key: "
if (self.local_public_key == None):
l += "none"
else:
l += self.print_key(self.local_public_key)
#endif
r = bold("remote-key: ", False) if do_bold else "remote-key: "
if (self.remote_public_key == None):
r += "none"
else:
r += self.print_key(self.remote_public_key)
#endif
dh = "ECDH" if (self.curve25519) else "DH"
cs = self.cipher_suite
return("{} cipher-suite: {}, {}, {}".format(dh, cs, l, r))
#enddef
def compare_keys(self, keys):
if (self.dh_g_value != keys.dh_g_value): return(False)
if (self.dh_p_value != keys.dh_p_value): return(False)
if (self.remote_public_key != keys.remote_public_key): return(False)
return(True)
#enddef
def compute_public_key(self):
if (self.curve25519): return(self.curve25519.get_public().public)
key = self.local_private_key
g = self.dh_g_value
p = self.dh_p_value
return(int((g**key) % p))
#enddef
def compute_shared_key(self, ed, print_shared=False):
key = self.local_private_key
remote_key = self.remote_public_key
compute = bold("Compute {} shared-key".format(ed), False)
lprint("{}, key-material: {}".format(compute, self.print_keys()))
if (self.curve25519):
public = curve25519.Public(remote_key)
self.shared_key = self.curve25519.get_shared_key(public)
else:
p = self.dh_p_value
self.shared_key = (remote_key**key) % p
#endif
#
# This should only be used in a lab for debugging and never live since
# its a security risk to expose the shared-key (even though the entire
# key is not displayed).
#
if (print_shared):
k = self.print_key(self.shared_key)
lprint("Computed shared-key: {}".format(k))
#endif
#
# Now compute keys we use for encryption and ICV authentication.
#
self.compute_encrypt_icv_keys()
#
# Increment counters and timestamp.
#
self.rekey_count += 1
self.last_rekey = lisp_get_timestamp()
#enddef
def compute_encrypt_icv_keys(self):
alg = hashlib.sha256
if (self.curve25519):
data = self.shared_key
else:
data = lisp_hex_string(self.shared_key)
#endif
#
# context = "0001" || "lisp-crypto" || "<lpub> xor <rpub>" || "0100"
#
l = self.local_public_key
if (type(l) != int): l = int(binascii.hexlify(l), 16)
r = self.remote_public_key
if (type(r) != int): r = int(binascii.hexlify(r), 16)
context = "0001" + "lisp-crypto" + lisp_hex_string(l ^ r) + "0100"
key_material = hmac.new(context.encode(), data, alg).hexdigest()
key_material = int(key_material, 16)
#
# key-material = key-material-1-encrypt || key-material-2-icv
#
ek = (key_material >> 128) & LISP_16_128_MASK
ik = key_material & LISP_16_128_MASK
ek = lisp_hex_string(ek).zfill(32)
self.encrypt_key = ek.encode()
fill = 32 if self.do_poly else 40
ik = lisp_hex_string(ik).zfill(fill)
self.icv_key = ik.encode()
#enddef
def do_icv(self, packet, nonce):
if (self.icv_key == None): return("")
if (self.do_poly):
poly = self.icv.poly1305aes
hexlify = self.icv.binascii.hexlify
nonce = hexlify(nonce)
hash_output = poly(self.encrypt_key, self.icv_key, nonce, packet)
if (lisp_is_python2()):
hash_output = hexlify(hash_output.encode("raw_unicode_escape"))
else:
hash_output = hexlify(hash_output).decode()
#endif
else:
key = binascii.unhexlify(self.icv_key)
hash_output = hmac.new(key, packet, self.icv).hexdigest()
hash_output = hash_output[0:40]
#endif
return(hash_output)
#enddef
def add_key_by_nonce(self, nonce):
if (nonce not in lisp_crypto_keys_by_nonce):
lisp_crypto_keys_by_nonce[nonce] = [None, None, None, None]
#endif
lisp_crypto_keys_by_nonce[nonce][self.key_id] = self
#enddef
def delete_key_by_nonce(self, nonce):
if (nonce not in lisp_crypto_keys_by_nonce): return
lisp_crypto_keys_by_nonce.pop(nonce)
#enddef
def add_key_by_rloc(self, addr_str, encap):
by_rlocs = lisp_crypto_keys_by_rloc_encap if encap else \
lisp_crypto_keys_by_rloc_decap
if (addr_str not in by_rlocs):
by_rlocs[addr_str] = [None, None, None, None]
#endif
by_rlocs[addr_str][self.key_id] = self
#
# If "ipc-data-plane = yes" is configured, we need to tell the data-
# plane from the lisp-etr process what the decryption key is.
#
if (encap == False):
lisp_write_ipc_decap_key(addr_str, by_rlocs[addr_str])
#endif
#enddef
def encode_lcaf(self, rloc_addr):
pub_key = self.normalize_pub_key(self.local_public_key)
key_len = self.key_length(pub_key)
sec_len = (6 + key_len + 2)
if (rloc_addr != None): sec_len += rloc_addr.addr_length()
packet = struct.pack("HBBBBHBB", socket.htons(LISP_AFI_LCAF), 0, 0,
LISP_LCAF_SECURITY_TYPE, 0, socket.htons(sec_len), 1, 0)
#
# Put in cipher suite value. Support 1024-bit keys only. Then insert
# key-length and public key material. Do not negotiate ECDH 25519
# cipher suite if library not installed on system.
#
cs = self.cipher_suite
packet += struct.pack("BBH", cs, 0, socket.htons(key_len))
#
# Insert public-key.
#
for i in range(0, key_len * 2, 16):
key = int(pub_key[i:i+16], 16)
packet += struct.pack("Q", byte_swap_64(key))
#endfor
#
# Insert RLOC address.
#
if (rloc_addr):
packet += struct.pack("H", socket.htons(rloc_addr.afi))
packet += rloc_addr.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, lcaf_len):
#
# Called by lisp_map_request().
#
if (lcaf_len == 0):
packet_format = "HHBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd, lcaf_type, rsvd, lcaf_len = struct.unpack( \
packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_SECURITY_TYPE):
packet = packet[lcaf_len + 6::]
return(packet)
#endif
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
#endif
#
# Fall through or called by lisp_rloc_record() when lcaf_len is
# non-zero.
#
lcaf_type = LISP_LCAF_SECURITY_TYPE
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
key_count, rsvd, cs, rsvd, key_len = struct.unpack(packet_format,
packet[:format_size])
#
# Advance packet pointer to beginning of key material. Validate there
# is enough packet to pull the key out according the encoded key
# length found earlier in the packet.
#
packet = packet[format_size::]
key_len = socket.ntohs(key_len)
if (len(packet) < key_len): return(None)
#
# Check Cipher Suites supported.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM, LISP_CS_25519_CHACHA,
LISP_CS_1024]
if (cs not in cs_list):
lprint("Cipher-suites {} supported, received {}".format(cs_list,
cs))
packet = packet[key_len::]
return(packet)
#endif
self.cipher_suite = cs
#
# Iterate to pull 8 bytes (64-bits) out at at time. The key is stored
# internally as an integer.
#
pub_key = 0
for i in range(0, key_len, 8):
key = byte_swap_64(struct.unpack("Q", packet[i:i+8])[0])
pub_key <<= 64
pub_key |= key
#endfor
self.remote_public_key = pub_key
#
# Convert to 32-byte binary string. Make sure leading 0s are included.
# ;-)
#
if (self.curve25519):
key = lisp_hex_string(self.remote_public_key)
key = key.zfill(64)
new_key = b""
for i in range(0, len(key), 2):
byte = int(key[i:i+2], 16)
new_key += lisp_store_byte(byte)
#endfor
self.remote_public_key = new_key
#endif
packet = packet[key_len::]
return(packet)
#enddef
#endclass
#
# lisp_store_byte
#
# We have to store a byte differently in a py2 string versus a py3 byte string.
# Check if the code was compiled with either python2 or python3.
#
def lisp_store_byte_py2(byte):
return(chr(byte))
#enddef
def lisp_store_byte_py3(byte):
return(bytes([byte]))
#enddef
lisp_store_byte = lisp_store_byte_py2
if (lisp_is_python3()): lisp_store_byte = lisp_store_byte_py3
#
# lisp_thread()
#
# Used to multi-thread the data-plane.
#
class lisp_thread(object):
def __init__(self, name):
self.thread_name = name
self.thread_number = -1
self.number_of_pcap_threads = 0
self.number_of_worker_threads = 0
self.input_queue = queue.Queue()
self.input_stats = lisp_stats()
self.lisp_packet = lisp_packet(None)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# The LISP fixed control header:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=x | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_control_header(object):
def __init__(self):
self.type = 0
self.record_count = 0
self.nonce = 0
self.rloc_probe = False
self.smr_bit = False
self.smr_invoked_bit = False
self.ddt_bit = False
self.to_etr = False
self.to_ms = False
self.info_reply = False
#enddef
def decode(self, packet):
packet_format = "BBBBQ"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
typeval, bits, reserved, self.record_count, self.nonce = \
struct.unpack(packet_format, packet[:format_size])
self.type = typeval >> 4
if (self.type == LISP_MAP_REQUEST):
self.smr_bit = True if (typeval & 0x01) else False
self.rloc_probe = True if (typeval & 0x02) else False
self.smr_invoked_bit = True if (bits & 0x40) else False
#endif
if (self.type == LISP_ECM):
self.ddt_bit = True if (typeval & 0x04) else False
self.to_etr = True if (typeval & 0x02) else False
self.to_ms = True if (typeval & 0x01) else False
#endif
if (self.type == LISP_NAT_INFO):
self.info_reply = True if (typeval & 0x08) else False
#endif
return(True)
#enddef
def is_info_request(self):
return((self.type == LISP_NAT_INFO and self.is_info_reply() == False))
#enddef
def is_info_reply(self):
return(True if self.info_reply else False)
#enddef
def is_rloc_probe(self):
return(True if self.rloc_probe else False)
#enddef
def is_smr(self):
return(True if self.smr_bit else False)
#enddef
def is_smr_invoked(self):
return(True if self.smr_invoked_bit else False)
#enddef
def is_ddt(self):
return(True if self.ddt_bit else False)
#enddef
def is_to_etr(self):
return(True if self.to_etr else False)
#enddef
def is_to_ms(self):
return(True if self.to_ms else False)
#enddef
#endclass
#
# The Map-Register message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=3 |P|S|I| Reserved | kid |e|F|T|a|m|M| Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | |
# +- ... xTR router-ID ... -+
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# +- ... xTR site-ID ... -+
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# kid are 1 of 8 values that describe the encryption key-id used for
# encrypting Map-Register messages.When the Map-Register is encrypted, the
# entire message not including the first 4 bytes are chacha20 encrypted. The
# e-bit must be set by the ETR to indicate that the Map-Register was encrypted.
#
class lisp_map_register(object):
def __init__(self):
self.proxy_reply_requested = False
self.lisp_sec_present = False
self.xtr_id_present = False
self.map_notify_requested = False
self.mobile_node = False
self.merge_register_requested = False
self.use_ttl_for_timeout = False
self.map_register_refresh = False
self.record_count = 0
self.nonce = 0
self.alg_id = 0
self.key_id = 0
self.auth_len = 0
self.auth_data = 0
self.xtr_id = 0
self.site_id = 0
self.record_count = 0
self.sport = 0
self.encrypt_bit = 0
self.encryption_key_id = None
#enddef
def print_map_register(self):
xtr_id = lisp_hex_string(self.xtr_id)
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}")
lprint(line.format(bold("Map-Register", False), \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_ttl_for_timeout else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node else "m",
"N" if self.map_notify_requested else "n",
"F" if self.map_register_refresh else "f",
"E" if self.encrypt_bit else "e",
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, xtr_id, self.site_id))
#enddef
def encode(self):
first_long = (LISP_MAP_REGISTER << 28) | self.record_count
if (self.proxy_reply_requested): first_long |= 0x08000000
if (self.lisp_sec_present): first_long |= 0x04000000
if (self.xtr_id_present): first_long |= 0x02000000
if (self.map_register_refresh): first_long |= 0x1000
if (self.use_ttl_for_timeout): first_long |= 0x800
if (self.merge_register_requested): first_long |= 0x400
if (self.mobile_node): first_long |= 0x200
if (self.map_notify_requested): first_long |= 0x100
if (self.encryption_key_id != None):
first_long |= 0x2000
first_long |= self.encryption_key_id << 14
#endif
#
# Append zeroed authentication data so we can compute hash latter.
#
if (self.alg_id == LISP_NONE_ALG_ID):
self.auth_len = 0
else:
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
self.auth_len = LISP_SHA1_160_AUTH_DATA_LEN
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
self.auth_len = LISP_SHA2_256_AUTH_DATA_LEN
#endif
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
packet = self.zero_auth(packet)
return(packet)
#enddef
def zero_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_data = b""
auth_len = 0
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
auth_len = struct.calcsize("QQI")
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
auth_len = struct.calcsize("QQQQ")
#endif
packet = packet[0:offset] + auth_data + packet[offset+auth_len::]
return(packet)
#enddef
def encode_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
auth_data = self.auth_data
packet = packet[0:offset] + auth_data + packet[offset + auth_len::]
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce = byte_swap_64(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
self.proxy_reply_requested = True if (first_long & 0x08000000) \
else False
self.lisp_sec_present = True if (first_long & 0x04000000) else False
self.xtr_id_present = True if (first_long & 0x02000000) else False
self.use_ttl_for_timeout = True if (first_long & 0x800) else False
self.map_register_refresh = True if (first_long & 0x1000) else False
self.merge_register_requested = True if (first_long & 0x400) else False
self.mobile_node = True if (first_long & 0x200) else False
self.map_notify_requested = True if (first_long & 0x100) else False
self.record_count = first_long & 0xff
#
# Decode e-bit and key-id for Map-Register decryption.
#
self.encrypt_bit = True if first_long & 0x2000 else False
if (self.encrypt_bit):
self.encryption_key_id = (first_long >> 14) & 0x7
#endif
#
# Decode xTR-ID and site-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(orig_packet) == False): return([None, None])
#endif
packet = packet[format_size::]
#
# Parse authentication and zero out the auth field in the packet.
#
if (self.auth_len != 0):
if (len(packet) < self.auth_len): return([None, None])
if (self.alg_id not in (LISP_NONE_ALG_ID, LISP_SHA_1_96_ALG_ID,
LISP_SHA_256_128_ALG_ID)):
lprint("Invalid authentication alg-id: {}".format(self.alg_id))
return([None, None])
#endif
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
format_size = struct.calcsize("QQI")
if (auth_len < format_size):
lprint("Invalid sha1-96 authentication length")
return([None, None])
#endif
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = b""
elif (self.alg_id == LISP_SHA_256_128_ALG_ID):
format_size = struct.calcsize("QQQQ")
if (auth_len < format_size):
lprint("Invalid sha2-256 authentication length")
return([None, None])
#endif
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
else:
lprint("Unsupported authentication alg-id value {}".format( \
self.alg_id))
return([None, None])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
orig_packet = self.zero_auth(orig_packet)
packet = packet[self.auth_len::]
#endif
return([orig_packet, packet])
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
site_id = byte_swap_64(self.site_id)
packet += struct.pack("QQQ", xtr_id_upper, xtr_id_lower, site_id)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQQ")
if (len(packet) < format_size): return([None, None])
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower, site_id = struct.unpack("QQQ",
packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
self.site_id = byte_swap_64(site_id)
return(True)
#enddef
#endclass
# The Map-Notify/Map-Notify-Ack message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=4/5| Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_notify(object):
def __init__(self, lisp_sockets):
self.etr = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.etr_port = 0
self.retransmit_timer = None
self.lisp_sockets = lisp_sockets
self.retry_count = 0
self.record_count = 0
self.alg_id = LISP_NONE_ALG_ID
self.key_id = 0
self.auth_len = 0
self.auth_data = ""
self.nonce = 0
self.nonce_key = ""
self.packet = None
self.site = ""
self.map_notify_ack = False
self.eid_records = ""
self.eid_list = []
#enddef
def print_notify(self):
auth_data = binascii.hexlify(self.auth_data)
if (self.alg_id == LISP_SHA_1_96_ALG_ID and len(auth_data) != 40):
auth_data = self.auth_data
elif (self.alg_id == LISP_SHA_256_128_ALG_ID and len(auth_data) != 64):
auth_data = self.auth_data
#endif
line = ("{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}")
lprint(line.format(bold("Map-Notify-Ack", False) if \
self.map_notify_ack else bold("Map-Notify", False),
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, auth_data))
#enddef
def zero_auth(self, packet):
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
#endif
packet += auth_data
return(packet)
#enddef
def encode(self, eid_records, password):
if (self.map_notify_ack):
first_long = (LISP_MAP_NOTIFY_ACK << 28) | self.record_count
else:
first_long = (LISP_MAP_NOTIFY << 28) | self.record_count
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
if (self.alg_id == LISP_NONE_ALG_ID):
self.packet = packet + eid_records
return(self.packet)
#endif
#
# Run authentication hash across packet.
#
packet = self.zero_auth(packet)
packet += eid_records
hashval = lisp_hash_me(packet, self.alg_id, password, False)
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
self.auth_data = hashval
packet = packet[0:offset] + hashval + packet[offset + auth_len::]
self.packet = packet
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.map_notify_ack = ((first_long >> 28) == LISP_MAP_NOTIFY_ACK)
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce_key = lisp_hex_string(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
packet = packet[format_size::]
self.eid_records = packet[self.auth_len::]
if (self.auth_len == 0): return(self.eid_records)
#
# Parse authentication and zero out the auth field in the packet.
#
if (len(packet) < self.auth_len): return(None)
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
format_size = struct.calcsize("I") + struct.calcsize("QHH")
packet = self.zero_auth(orig_packet[:format_size])
format_size += auth_len
packet += orig_packet[format_size::]
return(packet)
#enddef
#endclass
#
# Map-Request message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=1 |A|M|P|S|p|s|m|I|Reserved |L|D| IRC | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source-EID-AFI | Source EID Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI 1 | ITR-RLOC Address 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI n | ITR-RLOC Address n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / |N| Reserved | EID mask-len | EID-prefix-AFI |
# Rec +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Map-Reply Record ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | xTR-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When a Map-Request is signed, the hash is over the IPv6 CGA based EID,
# the Map-Request Nonce, and the EID-record. The signature is placed in
# the Source-EID as a LCAF JSON Type string of { "source-eid" : "<cga>",
# "signature-eid" : "<cga-of-signer>", "signature" : "<sig"> }.
#
# Generating private/public key-pairs via:
#
# openssl genpkey -algorithm RSA -out privkey.pem \
# -pkeyopt rsa_keygen_bits:2048
# openssl rsa -pubout -in privkey.pem -out pubkey.pem
#
# And use ecdsa.VerifyingKey.from_pem() after reading in file.
#
# xTR-ID is appended to the end of a Map-Request when a subscription request
# is piggybacked (when self.subscribe_bit is True).
#
class lisp_map_request(object):
def __init__(self):
self.auth_bit = False
self.map_data_present = False
self.rloc_probe = False
self.smr_bit = False
self.pitr_bit = False
self.smr_invoked_bit = False
self.mobile_node = False
self.xtr_id_present = False
self.local_xtr = False
self.dont_reply_bit = False
self.itr_rloc_count = 0
self.record_count = 0
self.nonce = 0
self.signature_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.itr_rlocs = []
self.keys = None
self.privkey_filename = None
self.map_request_signature = None
self.subscribe_bit = False
self.xtr_id = None
self.json_telemetry = None
#enddef
def print_prefix(self):
if (self.target_group.is_null()):
return(green(self.target_eid.print_prefix(), False))
#endif
return(green(self.target_eid.print_sg(self.target_group), False))
#enddef
def print_map_request(self):
xtr_id = ""
if (self.xtr_id != None and self.subscribe_bit):
xtr_id = "subscribe, xtr-id: 0x{}, ".format(lisp_hex_string( \
self.xtr_id))
#endif
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:")
lprint(line.format(bold("Map-Request", False), \
"A" if self.auth_bit else "a",
"D" if self.map_data_present else "d",
"R" if self.rloc_probe else "r",
"S" if self.smr_bit else "s",
"P" if self.pitr_bit else "p",
"I" if self.smr_invoked_bit else "i",
"M" if self.mobile_node else "m",
"X" if self.xtr_id_present else "x",
"L" if self.local_xtr else "l",
"D" if self.dont_reply_bit else "d", self.itr_rloc_count,
self.record_count, lisp_hex_string(self.nonce),
self.source_eid.afi, green(self.source_eid.print_address(), False),
" (with sig)" if self.map_request_signature != None else "",
self.target_eid.afi, green(self.print_prefix(), False), xtr_id))
keys = self.keys
for itr in self.itr_rlocs:
if (itr.afi == LISP_AFI_LCAF and self.json_telemetry != None):
continue
#endif
itr_str = red(itr.print_address_no_iid(), False)
lprint(" itr-rloc: afi {} {}{}".format(itr.afi, itr_str,
"" if (keys == None) else ", " + keys[1].print_keys()))
keys = None
#endfor
if (self.json_telemetry != None):
lprint(" itr-rloc: afi {} telemetry: {}".format(LISP_AFI_LCAF,
self.json_telemetry))
#endif
#enddef
def sign_map_request(self, privkey):
sig_eid = self.signature_eid.print_address()
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
self.map_request_signature = privkey.sign(sig_data.encode())
sig = binascii.b2a_base64(self.map_request_signature)
sig = { "source-eid" : source_eid, "signature-eid" : sig_eid,
"signature" : sig.decode() }
return(json.dumps(sig))
#enddef
def verify_map_request_sig(self, pubkey):
sseid = green(self.signature_eid.print_address(), False)
if (pubkey == None):
lprint("Public-key not found for signature-EID {}".format(sseid))
return(False)
#endif
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
pubkey = binascii.a2b_base64(pubkey)
good = True
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
lprint("Invalid public-key in mapping system for sig-eid {}". \
format(self.signature_eid.print_address_no_iid()))
good = False
#endtry
if (good):
try:
sig_data = sig_data.encode()
good = key.verify(self.map_request_signature, sig_data)
except:
good = False
#endtry
#endif
passfail = bold("passed" if good else "failed", False)
lprint("Signature verification {} for EID {}".format(passfail, sseid))
return(good)
#enddef
def encode_json(self, json_string):
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
lcaf_len = socket.htons(len(json_string) + 4)
json_len = socket.htons(len(json_string))
packet = struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, 0, lcaf_len,
json_len)
packet += json_string.encode()
packet += struct.pack("H", 0)
return(packet)
#enddef
def encode(self, probe_dest, probe_port):
first_long = (LISP_MAP_REQUEST << 28) | self.record_count
telemetry = lisp_telemetry_configured() if (self.rloc_probe) else None
if (telemetry != None): self.itr_rloc_count += 1
first_long = first_long | (self.itr_rloc_count << 8)
if (self.auth_bit): first_long |= 0x08000000
if (self.map_data_present): first_long |= 0x04000000
if (self.rloc_probe): first_long |= 0x02000000
if (self.smr_bit): first_long |= 0x01000000
if (self.pitr_bit): first_long |= 0x00800000
if (self.smr_invoked_bit): first_long |= 0x00400000
if (self.mobile_node): first_long |= 0x00200000
if (self.xtr_id_present): first_long |= 0x00100000
if (self.local_xtr): first_long |= 0x00004000
if (self.dont_reply_bit): first_long |= 0x00002000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
#
# Check if Map-Request is going to be signed. If so, encode json-string
# in source-EID field. Otherwise, just encode source-EID with instance-
# id in source-EID field.
#
encode_sig = False
filename = self.privkey_filename
if (filename != None and os.path.exists(filename)):
f = open(filename, "r"); key = f.read(); f.close()
try:
key = ecdsa.SigningKey.from_pem(key)
except:
return(None)
#endtry
json_string = self.sign_map_request(key)
encode_sig = True
elif (self.map_request_signature != None):
sig = binascii.b2a_base64(self.map_request_signature)
json_string = { "source-eid" : self.source_eid.print_address(),
"signature-eid" : self.signature_eid.print_address(),
"signature" : sig }
json_string = json.dumps(json_string)
encode_sig = True
#endif
if (encode_sig):
packet += self.encode_json(json_string)
else:
if (self.source_eid.instance_id != 0):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.source_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.source_eid.afi))
packet += self.source_eid.pack_address()
#endif
#endif
#
# For RLOC-probes, see if keys already negotiated for RLOC. If so,
# use them so a new DH exchange does not happen.
#
if (probe_dest):
if (probe_port == 0): probe_port = LISP_DATA_PORT
addr_str = probe_dest.print_address_no_iid() + ":" + \
str(probe_port)
if (addr_str in lisp_crypto_keys_by_rloc_encap):
self.keys = lisp_crypto_keys_by_rloc_encap[addr_str]
#endif
#endif
#
# If security is enabled, put security parameters in the first
# ITR-RLOC.
#
for itr in self.itr_rlocs:
if (lisp_data_plane_security and self.itr_rlocs.index(itr) == 0):
if (self.keys == None or self.keys[1] == None):
keys = lisp_keys(1)
self.keys = [None, keys, None, None]
#endif
keys = self.keys[1]
keys.add_key_by_nonce(self.nonce)
packet += keys.encode_lcaf(itr)
else:
packet += struct.pack("H", socket.htons(itr.afi))
packet += itr.pack_address()
#endif
#endfor
#
# Add telemetry, if configured and this is an RLOC-probe Map-Request.
#
if (telemetry != None):
ts = str(time.time())
telemetry = lisp_encode_telemetry(telemetry, io=ts)
self.json_telemetry = telemetry
packet += self.encode_json(telemetry)
#endif
mask_len = 0 if self.target_eid.is_binary() == False else \
self.target_eid.mask_len
subscribe = 0
if (self.subscribe_bit):
subscribe = 0x80
self.xtr_id_present = True
if (self.xtr_id == None):
self.xtr_id = random.randint(0, (2**128)-1)
#endif
#endif
packet_format = "BB"
packet += struct.pack(packet_format, subscribe, mask_len)
if (self.target_group.is_null() == False):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_sg(self.target_group)
elif (self.target_eid.instance_id != 0 or
self.target_eid.is_geo_prefix()):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.target_eid.afi))
packet += self.target_eid.pack_address()
#endif
#
# If this is a subscription request, append xTR-ID to end of packet.
#
if (self.subscribe_bit): packet = self.encode_xtr_id(packet)
return(packet)
#enddef
def lcaf_decode_json(self, packet):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len, json_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_JSON_TYPE): return(packet)
#
# Do lcaf-length and json-length checks first.
#
lcaf_len = socket.ntohs(lcaf_len)
json_len = socket.ntohs(json_len)
packet = packet[format_size::]
if (len(packet) < lcaf_len): return(None)
if (lcaf_len != json_len + 4): return(None)
#
# Pull out JSON string from packet.
#
json_string = packet[0:json_len]
packet = packet[json_len::]
#
# If telemetry data in the JSON, do not need to convert to dict array.
#
if (lisp_is_json_telemetry(json_string) != None):
self.json_telemetry = json_string
#endif
#
# Get JSON encoded afi-address in JSON, we are expecting AFI of 0.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0): return(packet)
if (self.json_telemetry != None): return(packet)
#
# Convert string to dictionary array.
#
try:
json_string = json.loads(json_string)
except:
return(None)
#endtry
#
# Store JSON data internally.
#
if ("source-eid" not in json_string): return(packet)
eid = json_string["source-eid"]
afi = LISP_AFI_IPV4 if eid.count(".") == 3 else LISP_AFI_IPV6 if \
eid.count(":") == 7 else None
if (afi == None):
lprint("Bad JSON 'source-eid' value: {}".format(eid))
return(None)
#endif
self.source_eid.afi = afi
self.source_eid.store_address(eid)
if ("signature-eid" not in json_string): return(packet)
eid = json_string["signature-eid"]
if (eid.count(":") != 7):
lprint("Bad JSON 'signature-eid' value: {}".format(eid))
return(None)
#endif
self.signature_eid.afi = LISP_AFI_IPV6
self.signature_eid.store_address(eid)
if ("signature" not in json_string): return(packet)
sig = binascii.a2b_base64(json_string["signature"])
self.map_request_signature = sig
return(packet)
#enddef
def decode(self, packet, source, port):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.auth_bit = True if (first_long & 0x08000000) else False
self.map_data_present = True if (first_long & 0x04000000) else False
self.rloc_probe = True if (first_long & 0x02000000) else False
self.smr_bit = True if (first_long & 0x01000000) else False
self.pitr_bit = True if (first_long & 0x00800000) else False
self.smr_invoked_bit = True if (first_long & 0x00400000) else False
self.mobile_node = True if (first_long & 0x00200000) else False
self.xtr_id_present = True if (first_long & 0x00100000) else False
self.local_xtr = True if (first_long & 0x00004000) else False
self.dont_reply_bit = True if (first_long & 0x00002000) else False
self.itr_rloc_count = ((first_long >> 8) & 0x1f)
self.record_count = first_long & 0xff
self.nonce = nonce[0]
#
# Decode xTR-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(packet) == False): return(None)
#endif
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])
self.source_eid.afi = socket.ntohs(afi[0])
packet = packet[format_size::]
if (self.source_eid.afi == LISP_AFI_LCAF):
save_packet = packet
packet = self.source_eid.lcaf_decode_iid(packet)
if (packet == None):
packet = self.lcaf_decode_json(save_packet)
if (packet == None): return(None)
#endif
elif (self.source_eid.afi != LISP_AFI_NONE):
packet = self.source_eid.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source_eid.mask_len = self.source_eid.host_mask_len()
no_crypto = (os.getenv("LISP_NO_CRYPTO") != None)
self.itr_rlocs = []
itr_rloc_count = self.itr_rloc_count + 1
while (itr_rloc_count != 0):
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = socket.ntohs(struct.unpack("H", packet[:format_size])[0])
itr = lisp_address(LISP_AFI_NONE, "", 32, 0)
itr.afi = afi
#
# We may have telemetry in the ITR-RLOCs. Check here to avoid
# security key material logic.
#
if (itr.afi == LISP_AFI_LCAF):
orig_packet = packet
json_packet = packet[format_size::]
packet = self.lcaf_decode_json(json_packet)
if (packet == None): return(None)
if (packet == json_packet): packet = orig_packet
#endif
#
# If Security Type LCAF, get security parameters and store in
# lisp_keys().
#
if (itr.afi != LISP_AFI_LCAF):
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
#
# Decide if we should remove security key state if ITR decided
# to stop doing key exchange when it previously had.
#
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
rloc_keys = lisp_crypto_keys_by_rloc_decap
if (addr_str in rloc_keys): rloc_keys.pop(addr_str)
#
# If "ipc-data-plane = yes" is configured, we need to tell the
# data-plane from the lisp-etr process there is no longer a
# decryption key.
#
lisp_write_ipc_decap_key(addr_str, None)
elif (self.json_telemetry == None):
#
# Decode key material if we found no telemetry data.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM,
LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC or
decode_key.cipher_suite == LISP_CS_25519_GCM):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_curve=False,
do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr.afi = socket.ntohs(afi)
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
stored_key = None
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
if (addr_str in lisp_crypto_keys_by_rloc_decap):
keys = lisp_crypto_keys_by_rloc_decap[addr_str]
stored_key = keys[1] if keys and keys[1] else None
#endif
new = True
if (stored_key):
if (stored_key.compare_keys(key)):
self.keys = [None, stored_key, None, None]
lprint("Maintain stored decap-keys for RLOC {}". \
format(red(addr_str, False)))
else:
new = False
remote = bold("Remote decap-rekeying", False)
lprint("{} for RLOC {}".format(remote, red(addr_str,
False)))
key.copy_keypair(stored_key)
key.uptime = stored_key.uptime
stored_key = None
#endif
#endif
if (stored_key == None):
self.keys = [None, key, None, None]
if (lisp_i_am_etr == False and lisp_i_am_rtr == False):
key.local_public_key = None
lprint("{} for {}".format(bold("Ignoring decap-keys",
False), red(addr_str, False)))
elif (key.remote_public_key != None):
if (new):
lprint("{} for RLOC {}".format( \
bold("New decap-keying", False),
red(addr_str, False)))
#endif
key.compute_shared_key("decap")
key.add_key_by_rloc(addr_str, False)
#endif
#endif
#endif
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
#endwhile
format_size = struct.calcsize("BBH")
if (len(packet) < format_size): return(None)
subscribe, mask_len, afi = struct.unpack("BBH", packet[:format_size])
self.subscribe_bit = (subscribe & 0x80)
self.target_eid.afi = socket.ntohs(afi)
packet = packet[format_size::]
self.target_eid.mask_len = mask_len
if (self.target_eid.afi == LISP_AFI_LCAF):
packet, target_group = self.target_eid.lcaf_decode_eid(packet)
if (packet == None): return(None)
if (target_group): self.target_group = target_group
else:
packet = self.target_eid.unpack_address(packet)
if (packet == None): return(None)
packet = packet[format_size::]
#endif
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.target_eid, self.target_group))
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
packet += struct.pack("QQ", xtr_id_upper, xtr_id_lower)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQ")
if (len(packet) < format_size): return(None)
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower = struct.unpack("QQ", packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
return(True)
#enddef
#endclass
#
# Map-Reply Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=2 |P|E|S| Reserved | Hop Count | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R |N|Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_reply(object):
def __init__(self):
self.rloc_probe = False
self.echo_nonce_capable = False
self.security = False
self.record_count = 0
self.hop_count = 0
self.nonce = 0
self.keys = None
#enddef
def print_map_reply(self):
line = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + \
"nonce: 0x{}"
lprint(line.format(bold("Map-Reply", False), \
"R" if self.rloc_probe else "r",
"E" if self.echo_nonce_capable else "e",
"S" if self.security else "s", self.hop_count, self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REPLY << 28) | self.record_count
first_long |= self.hop_count << 8
if (self.rloc_probe): first_long |= 0x08000000
if (self.echo_nonce_capable): first_long |= 0x04000000
if (self.security): first_long |= 0x02000000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.rloc_probe = True if (first_long & 0x08000000) else False
self.echo_nonce_capable = True if (first_long & 0x04000000) else False
self.security = True if (first_long & 0x02000000) else False
self.hop_count = (first_long >> 8) & 0xff
self.record_count = first_long & 0xff
self.nonce = nonce[0]
if (self.nonce in lisp_crypto_keys_by_nonce):
self.keys = lisp_crypto_keys_by_nonce[self.nonce]
self.keys[1].delete_key_by_nonce(self.nonce)
#endif
return(packet)
#enddef
#endclass
#
# This is the structure of an EID record in a Map-Request, Map-Reply, and
# Map-Register.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Locator Count | EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd | Map-Version Number | EID-Prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-Prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When E is set, the entire locator-set records are encrypted with the chacha
# cipher.
#
# And this for a EID-record in a Map-Referral.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Referral Count| EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |SigCnt | Map Version Number | EID-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_eid_record(object):
def __init__(self):
self.record_ttl = 0
self.rloc_count = 0
self.action = 0
self.authoritative = False
self.ddt_incomplete = False
self.signature_count = 0
self.map_version = 0
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.record_ttl = 0
#enddef
def print_prefix(self):
if (self.group.is_null()):
return(green(self.eid.print_prefix(), False))
#endif
return(green(self.eid.print_sg(self.group), False))
#enddef
def print_ttl(self):
ttl = self.record_ttl
if (self.record_ttl & 0x80000000):
ttl = str(self.record_ttl & 0x7fffffff) + " secs"
elif ((ttl % 60) == 0):
ttl = str(old_div(ttl, 60)) + " hours"
else:
ttl = str(ttl) + " mins"
#endif
return(ttl)
#enddef
def store_ttl(self):
ttl = self.record_ttl * 60
if (self.record_ttl & 0x80000000): ttl = self.record_ttl & 0x7fffffff
return(ttl)
#enddef
def print_record(self, indent, ddt):
incomplete = ""
sig_count = ""
action_str = bold("invalid-action", False)
if (ddt):
if (self.action < len(lisp_map_referral_action_string)):
action_str = lisp_map_referral_action_string[self.action]
action_str = bold(action_str, False)
incomplete = (", " + bold("ddt-incomplete", False)) if \
self.ddt_incomplete else ""
sig_count = (", sig-count: " + str(self.signature_count)) if \
(self.signature_count != 0) else ""
#endif
else:
if (self.action < len(lisp_map_reply_action_string)):
action_str = lisp_map_reply_action_string[self.action]
if (self.action != LISP_NO_ACTION):
action_str = bold(action_str, False)
#endif
#endif
#endif
afi = LISP_AFI_LCAF if (self.eid.afi < 0) else self.eid.afi
line = ("{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}")
lprint(line.format(indent, self.print_ttl(), self.rloc_count,
action_str, "auth" if (self.authoritative is True) else "non-auth",
incomplete, sig_count, self.map_version, afi,
green(self.print_prefix(), False)))
#enddef
def encode(self):
action = self.action << 13
if (self.authoritative): action |= 0x1000
if (self.ddt_incomplete): action |= 0x800
#
# Decide on AFI value.
#
afi = self.eid.afi if (self.eid.instance_id == 0) else LISP_AFI_LCAF
if (afi < 0): afi = LISP_AFI_LCAF
sg = (self.group.is_null() == False)
if (sg): afi = LISP_AFI_LCAF
sig_mv = (self.signature_count << 12) | self.map_version
mask_len = 0 if self.eid.is_binary() == False else self.eid.mask_len
packet = struct.pack("IBBHHH", socket.htonl(self.record_ttl),
self.rloc_count, mask_len, socket.htons(action),
socket.htons(sig_mv), socket.htons(afi))
#
# Check if we are encoding an (S,G) entry.
#
if (sg):
packet += self.eid.lcaf_encode_sg(self.group)
return(packet)
#endif
#
# Check if we are encoding an geo-prefix in an EID-record.
#
if (self.eid.afi == LISP_AFI_GEO_COORD and self.eid.instance_id == 0):
packet = packet[0:-2]
packet += self.eid.address.encode_geo()
return(packet)
#endif
#
# Check if instance-ID needs to be encoded in the EID record.
#
if (afi == LISP_AFI_LCAF):
packet += self.eid.lcaf_encode_iid()
return(packet)
#endif
#
# Just encode the AFI for the EID.
#
packet += self.eid.pack_address()
return(packet)
#enddef
def decode(self, packet):
packet_format = "IBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.record_ttl, self.rloc_count, self.eid.mask_len, action, \
self.map_version, self.eid.afi = \
struct.unpack(packet_format, packet[:format_size])
self.record_ttl = socket.ntohl(self.record_ttl)
action = socket.ntohs(action)
self.action = (action >> 13) & 0x7
self.authoritative = True if ((action >> 12) & 1) else False
self.ddt_incomplete = True if ((action >> 11) & 1) else False
self.map_version = socket.ntohs(self.map_version)
self.signature_count = self.map_version >> 12
self.map_version = self.map_version & 0xfff
self.eid.afi = socket.ntohs(self.eid.afi)
self.eid.instance_id = 0
packet = packet[format_size::]
#
# Check if instance-ID LCAF is encoded in the EID-record.
#
if (self.eid.afi == LISP_AFI_LCAF):
packet, group = self.eid.lcaf_decode_eid(packet)
if (group): self.group = group
self.group.instance_id = self.eid.instance_id
return(packet)
#endif
packet = self.eid.unpack_address(packet)
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# Encapsualted Control Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# OH | (uses RLOC addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4342 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LH |Type=8 |S|D|E|M| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# IH | (uses RLOC or EID addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = yyyy |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LCM | LISP Control Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
class lisp_ecm(object):
def __init__(self, sport):
self.security = False
self.ddt = False
self.to_etr = False
self.to_ms = False
self.length = 0
self.ttl = LISP_DEFAULT_ECM_TTL
self.protocol = LISP_UDP_PROTOCOL
self.ip_checksum = 0
self.source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.udp_sport = sport
self.udp_dport = LISP_CTRL_PORT
self.udp_checksum = 0
self.udp_length = 0
self.afi = LISP_AFI_NONE
#enddef
def print_ecm(self):
line = ("{} -> flags: {}{}{}{}, " + \
"inner IP: {} -> {}, inner UDP: {} -> {}")
lprint(line.format(bold("ECM", False), "S" if self.security else "s",
"D" if self.ddt else "d", "E" if self.to_etr else "e",
"M" if self.to_ms else "m",
green(self.source.print_address(), False),
green(self.dest.print_address(), False), self.udp_sport,
self.udp_dport))
#enddef
def encode(self, packet, inner_source, inner_dest):
self.udp_length = len(packet) + 8
self.source = inner_source
self.dest = inner_dest
if (inner_dest.is_ipv4()):
self.afi = LISP_AFI_IPV4
self.length = self.udp_length + 20
#endif
if (inner_dest.is_ipv6()):
self.afi = LISP_AFI_IPV6
self.length = self.udp_length
#endif
#
# Encode ECM header first, then the IPv4 or IPv6 header, then the
# UDP header.
#
first_long = (LISP_ECM << 28)
if (self.security): first_long |= 0x08000000
if (self.ddt): first_long |= 0x04000000
if (self.to_etr): first_long |= 0x02000000
if (self.to_ms): first_long |= 0x01000000
ecm = struct.pack("I", socket.htonl(first_long))
ip = ""
if (self.afi == LISP_AFI_IPV4):
ip = struct.pack("BBHHHBBH", 0x45, 0, socket.htons(self.length),
0, 0, self.ttl, self.protocol, socket.htons(self.ip_checksum))
ip += self.source.pack_address()
ip += self.dest.pack_address()
ip = lisp_ip_checksum(ip)
#endif
if (self.afi == LISP_AFI_IPV6):
ip = struct.pack("BBHHBB", 0x60, 0, 0, socket.htons(self.length),
self.protocol, self.ttl)
ip += self.source.pack_address()
ip += self.dest.pack_address()
#endif
s = socket.htons(self.udp_sport)
d = socket.htons(self.udp_dport)
l = socket.htons(self.udp_length)
c = socket.htons(self.udp_checksum)
udp = struct.pack("HHHH", s, d, l, c)
return(ecm + ip + udp)
#enddef
def decode(self, packet):
#
# Decode ECM header.
#
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.security = True if (first_long & 0x08000000) else False
self.ddt = True if (first_long & 0x04000000) else False
self.to_etr = True if (first_long & 0x02000000) else False
self.to_ms = True if (first_long & 0x01000000) else False
packet = packet[format_size::]
#
# Decode inner IPv4/IPv6 and UDP header.
#
if (len(packet) < 1): return(None)
version = struct.unpack("B", packet[0:1])[0]
version = version >> 4
if (version == 4):
format_size = struct.calcsize("HHIBBH")
if (len(packet) < format_size): return(None)
x, l, x, t, p, c = struct.unpack("HHIBBH", packet[:format_size])
self.length = socket.ntohs(l)
self.ttl = t
self.protocol = p
self.ip_checksum = socket.ntohs(c)
self.source.afi = self.dest.afi = LISP_AFI_IPV4
#
# Zero out IPv4 header checksum.
#
p = struct.pack("H", 0)
offset1 = struct.calcsize("HHIBB")
offset2 = struct.calcsize("H")
packet = packet[:offset1] + p + packet[offset1+offset2:]
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
if (version == 6):
format_size = struct.calcsize("IHBB")
if (len(packet) < format_size): return(None)
x, l, p, t = struct.unpack("IHBB", packet[:format_size])
self.length = socket.ntohs(l)
self.protocol = p
self.ttl = t
self.source.afi = self.dest.afi = LISP_AFI_IPV6
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source.mask_len = self.source.host_mask_len()
self.dest.mask_len = self.dest.host_mask_len()
format_size = struct.calcsize("HHHH")
if (len(packet) < format_size): return(None)
s, d, l, c = struct.unpack("HHHH", packet[:format_size])
self.udp_sport = socket.ntohs(s)
self.udp_dport = socket.ntohs(d)
self.udp_length = socket.ntohs(l)
self.udp_checksum = socket.ntohs(c)
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is the structure of an RLOC record in a Map-Request, Map-Reply, and
# Map-Register's EID record.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# /| Priority | Weight | M Priority | M Weight |
# L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# o | Unused Flags |L|p|R| Loc-AFI |
# c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \| Locator |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# AFI-List LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 1 | Rsvd2 | 2 + 4 + 2 + 16 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 1 | IPv4 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv4 Address | AFI = 2 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Geo Coordinate LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 5 | Rsvd2 | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |U|N|E|A|M|R|K| Reserved | Location Uncertainty |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Lat Degrees | Latitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Long Degrees | Longitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Altitude |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Radius | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Explicit Locator Path (ELP) Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 10 | Rsvd2 | n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop k ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Replication List Entry Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 13 | Rsvd2 | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #1 RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #n RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Security Key Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 11 | Rsvd2 | 6 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Count | Rsvd3 |A| Cipher Suite| Rsvd4 |R|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Length | Public Key Material ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... Public Key Material |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Locator Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# JSON Data Model Type Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 14 | kid | Rvd2|E|B| Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | JSON length | JSON binary/text encoding ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Optional Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When the E-bit is set to 1, then the kid is key-id and indicates that
# value fields in JSON string are encrypted with the encryption key
# associated with key-id 'kid'.
#
class lisp_rloc_record(object):
def __init__(self):
self.priority = 0
self.weight = 0
self.mpriority = 0
self.mweight = 0
self.local_bit = False
self.probe_bit = False
self.reach_bit = False
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.rloc_name = None
self.keys = None
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def print_record(self, indent):
rloc_str = self.print_rloc_name()
if (rloc_str != ""): rloc_str = ", " + rloc_str
geo_str = ""
if (self.geo):
name = ""
if (self.geo.geo_name): name = "'{}' ".format(self.geo.geo_name)
geo_str = ", geo: {}{}".format(name, self.geo.print_geo())
#endif
elp_str = ""
if (self.elp):
name = ""
if (self.elp.elp_name): name = "'{}' ".format(self.elp.elp_name)
elp_str = ", elp: {}{}".format(name, self.elp.print_elp(True))
#endif
rle_str = ""
if (self.rle):
name = ""
if (self.rle.rle_name): name = "'{}' ".format(self.rle.rle_name)
rle_str = ", rle: {}{}".format(name, self.rle.print_rle(False,
True))
#endif
json_str = ""
if (self.json):
name = ""
if (self.json.json_name):
name = "'{}' ".format(self.json.json_name)
#endif
json_str = ", json: {}".format(self.json.print_json(False))
#endif
sec_str = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
sec_str = ", " + self.keys[1].print_keys()
#endif
line = ("{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}")
lprint(line.format(indent, self.print_flags(), self.priority,
self.weight, self.mpriority, self.mweight, self.rloc.afi,
red(self.rloc.print_address_no_iid(), False), rloc_str, geo_str,
elp_str, rle_str, json_str, sec_str))
#enddef
def print_flags(self):
return("{}{}{}".format("L" if self.local_bit else "l", "P" \
if self.probe_bit else "p", "R" if self.reach_bit else "r"))
#enddef
def store_rloc_entry(self, rloc_entry):
rloc = rloc_entry.rloc if (rloc_entry.translated_rloc.is_null()) \
else rloc_entry.translated_rloc
self.rloc.copy_address(rloc)
if (rloc_entry.rloc_name):
self.rloc_name = rloc_entry.rloc_name
#endif
if (rloc_entry.geo):
self.geo = rloc_entry.geo
else:
name = rloc_entry.geo_name
if (name and name in lisp_geo_list):
self.geo = lisp_geo_list[name]
#endif
#endif
if (rloc_entry.elp):
self.elp = rloc_entry.elp
else:
name = rloc_entry.elp_name
if (name and name in lisp_elp_list):
self.elp = lisp_elp_list[name]
#endif
#endif
if (rloc_entry.rle):
self.rle = rloc_entry.rle
else:
name = rloc_entry.rle_name
if (name and name in lisp_rle_list):
self.rle = lisp_rle_list[name]
#endif
#endif
if (rloc_entry.json):
self.json = rloc_entry.json
else:
name = rloc_entry.json_name
if (name and name in lisp_json_list):
self.json = lisp_json_list[name]
#endif
#endif
self.priority = rloc_entry.priority
self.weight = rloc_entry.weight
self.mpriority = rloc_entry.mpriority
self.mweight = rloc_entry.mweight
#enddef
def encode_json(self, lisp_json):
json_string = lisp_json.json_string
kid = 0
if (lisp_json.json_encrypted):
kid = (lisp_json.json_key_id << 5) | 0x02
#endif
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
addr_len = self.rloc.addr_length() + 2
lcaf_len = socket.htons(len(json_string) + addr_len)
json_len = socket.htons(len(json_string))
packet = struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, kid,
lcaf_len, json_len)
packet += json_string.encode()
#
# If telemetry, store RLOC address in LCAF.
#
if (lisp_is_json_telemetry(json_string)):
packet += struct.pack("H", socket.htons(self.rloc.afi))
packet += self.rloc.pack_address()
else:
packet += struct.pack("H", 0)
#endif
return(packet)
#enddef
def encode_lcaf(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
gpkt = b""
if (self.geo):
gpkt = self.geo.encode_geo()
#endif
epkt = b""
if (self.elp):
elp_recs = b""
for elp_node in self.elp.elp_nodes:
afi = socket.htons(elp_node.address.afi)
flags = 0
if (elp_node.eid): flags |= 0x4
if (elp_node.probe): flags |= 0x2
if (elp_node.strict): flags |= 0x1
flags = socket.htons(flags)
elp_recs += struct.pack("HH", flags, afi)
elp_recs += elp_node.address.pack_address()
#endfor
elp_len = socket.htons(len(elp_recs))
epkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_ELP_TYPE,
0, elp_len)
epkt += elp_recs
#endif
rpkt = b""
if (self.rle):
rle_recs = b""
for rle_node in self.rle.rle_nodes:
afi = socket.htons(rle_node.address.afi)
rle_recs += struct.pack("HBBH", 0, 0, rle_node.level, afi)
rle_recs += rle_node.address.pack_address()
if (rle_node.rloc_name):
rle_recs += struct.pack("H", socket.htons(LISP_AFI_NAME))
rle_recs += (rle_node.rloc_name + "\0").encode()
#endif
#endfor
rle_len = socket.htons(len(rle_recs))
rpkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_RLE_TYPE,
0, rle_len)
rpkt += rle_recs
#endif
jpkt = b""
if (self.json):
jpkt = self.encode_json(self.json)
#endif
spkt = b""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
spkt = self.keys[1].encode_lcaf(self.rloc)
#endif
npkt = b""
if (self.rloc_name):
npkt += struct.pack("H", socket.htons(LISP_AFI_NAME))
npkt += (self.rloc_name + "\0").encode()
#endif
apkt_len = len(gpkt) + len(epkt) + len(rpkt) + len(spkt) + 2 + \
len(jpkt) + self.rloc.addr_length() + len(npkt)
apkt_len = socket.htons(apkt_len)
apkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_AFI_LIST_TYPE,
0, apkt_len, socket.htons(self.rloc.afi))
apkt += self.rloc.pack_address()
return(apkt + npkt + gpkt + epkt + rpkt + spkt + jpkt)
#enddef
def encode(self):
flags = 0
if (self.local_bit): flags |= 0x0004
if (self.probe_bit): flags |= 0x0002
if (self.reach_bit): flags |= 0x0001
packet = struct.pack("BBBBHH", self.priority, self.weight,
self.mpriority, self.mweight, socket.htons(flags),
socket.htons(self.rloc.afi))
if (self.geo or self.elp or self.rle or self.keys or self.rloc_name \
or self.json):
try:
packet = packet[0:-2] + self.encode_lcaf()
except:
lprint("Could not encode LCAF for RLOC-record")
#endtry
else:
packet += self.rloc.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, nonce, ms_json_encrypt):
packet_format = "HBBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
#
# Process AFI-List LCAF.
#
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE):
while (lcaf_len > 0):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
packet_len = len(packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF):
packet = self.decode_lcaf(packet, nonce, ms_json_encrypt)
if (packet == None): return(None)
else:
packet = packet[format_size::]
self.rloc_name = None
if (afi == LISP_AFI_NAME):
packet, rloc_name = lisp_decode_dist_name(packet)
self.rloc_name = rloc_name
else:
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
#endif
lcaf_len -= packet_len - len(packet)
#endwhile
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
#
# Process Geo-Coordinate LCAF.
#
geo = lisp_geo("")
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
if (packet == None): return(None)
self.geo = geo
elif (lcaf_type == LISP_LCAF_JSON_TYPE):
encrypted_json = rsvd2 & 0x02
#
# Process JSON LCAF.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
json_len = struct.unpack(packet_format, packet[:format_size])[0]
json_len = socket.ntohs(json_len)
if (lcaf_len < format_size + json_len): return(None)
packet = packet[format_size::]
self.json = lisp_json("", packet[0:json_len], encrypted_json,
ms_json_encrypt)
packet = packet[json_len::]
#
# If telemetry, store RLOC address in LCAF.
#
afi = socket.ntohs(struct.unpack("H", packet[:2])[0])
packet = packet[2::]
if (afi != 0 and lisp_is_json_telemetry(self.json.json_string)):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
#endif
elif (lcaf_type == LISP_LCAF_ELP_TYPE):
#
# Process ELP LCAF.
#
elp = lisp_elp(None)
elp.elp_nodes = []
while (lcaf_len > 0):
flags, afi = struct.unpack("HH", packet[:4])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
elp_node = lisp_elp_node()
elp.elp_nodes.append(elp_node)
flags = socket.ntohs(flags)
elp_node.eid = (flags & 0x4)
elp_node.probe = (flags & 0x2)
elp_node.strict = (flags & 0x1)
elp_node.address.afi = afi
elp_node.address.mask_len = elp_node.address.host_mask_len()
packet = elp_node.address.unpack_address(packet[4::])
lcaf_len -= elp_node.address.addr_length() + 4
#endwhile
elp.select_elp_node()
self.elp = elp
elif (lcaf_type == LISP_LCAF_RLE_TYPE):
#
# Process RLE LCAF.
#
rle = lisp_rle(None)
rle.rle_nodes = []
while (lcaf_len > 0):
x, y, level, afi = struct.unpack("HBBH", packet[:6])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
rle_node = lisp_rle_node()
rle.rle_nodes.append(rle_node)
rle_node.level = level
rle_node.address.afi = afi
rle_node.address.mask_len = rle_node.address.host_mask_len()
packet = rle_node.address.unpack_address(packet[6::])
lcaf_len -= rle_node.address.addr_length() + 6
if (lcaf_len >= 2):
afi = struct.unpack("H", packet[:2])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[2::]
packet, rle_node.rloc_name = \
lisp_decode_dist_name(packet)
if (packet == None): return(None)
lcaf_len -= len(rle_node.rloc_name) + 1 + 2
#endif
#endif
#endwhile
self.rle = rle
self.rle.build_forwarding_list()
elif (lcaf_type == LISP_LCAF_SECURITY_TYPE):
#
# Get lisp_key() data structure so we can parse keys in the Map-
# Reply RLOC-record. Then get the RLOC address.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
if (len(packet) < 2): return(None)
afi = struct.unpack("H", packet[:2])[0]
self.rloc.afi = socket.ntohs(afi)
if (len(packet) < self.rloc.addr_length()): return(None)
packet = self.rloc.unpack_address(packet[2::])
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#
# Some RLOC records may not have RLOC addresses but other LCAF
# types. Don't process security keys because we need RLOC addresses
# to index into security data structures.
#
if (self.rloc.is_null()): return(packet)
rloc_name_str = self.rloc_name
if (rloc_name_str): rloc_name_str = blue(self.rloc_name, False)
#
# If we found no stored key, store the newly created lisp_keys()
# to the RLOC list if and only if a remote public-key was supplied
# in the Map-Reply.
#
stored_key = self.keys[1] if self.keys else None
if (stored_key == None):
if (key.remote_public_key == None):
string = bold("No remote encap-public-key supplied", False)
lprint(" {} for {}".format(string, rloc_name_str))
key = None
else:
string = bold("New encap-keying with new state", False)
lprint(" {} for {}".format(string, rloc_name_str))
key.compute_shared_key("encap")
#endif
#endif
#
# If we have stored-key, the other side received the local public
# key that is stored in variable 'stored_key'. If the remote side
# did not supply a public-key, it doesn't want to do lisp-crypto.
# If it did supply a public key, check to see if the same as
# last time, and if so, do nothing, else we do a rekeying.
#
if (stored_key):
if (key.remote_public_key == None):
key = None
remote = bold("Remote encap-unkeying occurred", False)
lprint(" {} for {}".format(remote, rloc_name_str))
elif (stored_key.compare_keys(key)):
key = stored_key
lprint(" Maintain stored encap-keys for {}".format( \
rloc_name_str))
else:
if (stored_key.remote_public_key == None):
string = "New encap-keying for existing state"
else:
string = "Remote encap-rekeying"
#endif
lprint(" {} for {}".format(bold(string, False),
rloc_name_str))
stored_key.remote_public_key = key.remote_public_key
stored_key.compute_shared_key("encap")
key = stored_key
#endif
#endif
self.keys = [None, key, None, None]
else:
#
# All other LCAFs we skip over and ignore.
#
packet = packet[lcaf_len::]
#endif
return(packet)
#enddef
def decode(self, packet, nonce, ms_json_encrypt=False):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.priority, self.weight, self.mpriority, self.mweight, flags, \
afi = struct.unpack(packet_format, packet[:format_size])
flags = socket.ntohs(flags)
afi = socket.ntohs(afi)
self.local_bit = True if (flags & 0x0004) else False
self.probe_bit = True if (flags & 0x0002) else False
self.reach_bit = True if (flags & 0x0001) else False
if (afi == LISP_AFI_LCAF):
packet = packet[format_size-2::]
packet = self.decode_lcaf(packet, nonce, ms_json_encrypt)
else:
self.rloc.afi = afi
packet = packet[format_size::]
packet = self.rloc.unpack_address(packet)
#endif
self.rloc.mask_len = self.rloc.host_mask_len()
return(packet)
#enddef
def end_of_rlocs(self, packet, rloc_count):
for i in range(rloc_count):
packet = self.decode(packet, None, False)
if (packet == None): return(None)
#endfor
return(packet)
#enddef
#endclass
#
# Map-Referral Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=6 | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Referral Count| EID mask-len | ACT |A|I| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c |SigCnt | Map Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix ... |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |R| Loc/LCAF-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator ... |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_referral(object):
def __init__(self):
self.record_count = 0
self.nonce = 0
#enddef
def print_map_referral(self):
lprint("{} -> record-count: {}, nonce: 0x{}".format( \
bold("Map-Referral", False), self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REFERRAL << 28) | self.record_count
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is a DDT cache type data structure that holds information configured
# in the "lisp ddt-authoritative-prefix" and "lisp delegate" commands. The
# self.delegatione_set[] is a list of lisp_ddt_node()s.
#
class lisp_ddt_entry(object):
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.delegation_set = []
self.source_cache = None
self.map_referrals_sent = 0
#enddef
def is_auth_prefix(self):
if (len(self.delegation_set) != 0): return(False)
if (self.is_star_g()): return(False)
return(True)
#enddef
def is_ms_peer_entry(self):
if (len(self.delegation_set) == 0): return(False)
return(self.delegation_set[0].is_ms_peer())
#enddef
def print_referral_type(self):
if (len(self.delegation_set) == 0): return("unknown")
ddt_node = self.delegation_set[0]
return(ddt_node.print_node_type())
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_ddt_cache.add_cache(self.eid, self)
else:
ddt = lisp_ddt_cache.lookup_cache(self.group, True)
if (ddt == None):
ddt = lisp_ddt_entry()
ddt.eid.copy_address(self.group)
ddt.group.copy_address(self.group)
lisp_ddt_cache.add_cache(self.group, ddt)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ddt.group)
ddt.add_source_entry(self)
#endif
#enddef
def add_source_entry(self, source_ddt):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ddt.eid, source_ddt)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
#endclass
class lisp_ddt_node(object):
def __init__(self):
self.delegate_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.map_server_peer = False
self.map_server_child = False
self.priority = 0
self.weight = 0
#enddef
def print_node_type(self):
if (self.is_ddt_child()): return("ddt-child")
if (self.is_ms_child()): return("map-server-child")
if (self.is_ms_peer()): return("map-server-peer")
#enddef
def is_ddt_child(self):
if (self.map_server_child): return(False)
if (self.map_server_peer): return(False)
return(True)
#enddef
def is_ms_child(self):
return(self.map_server_child)
#enddef
def is_ms_peer(self):
return(self.map_server_peer)
#enddef
#endclass
#
# This is a Map-Request queue used on a Map-Resolver when waiting for a
# Map-Referral to be retunred by a DDT-node or a Map-Server.
#
class lisp_ddt_map_request(object):
def __init__(self, lisp_sockets, packet, eid, group, nonce):
self.uptime = lisp_get_timestamp()
self.lisp_sockets = lisp_sockets
self.packet = packet
self.eid = eid
self.group = group
self.nonce = nonce
self.mr_source = None
self.sport = 0
self.itr = None
self.retry_count = 0
self.send_count = 0
self.retransmit_timer = None
self.last_request_sent_to = None
self.from_pitr = False
self.tried_root = False
self.last_cached_prefix = [None, None]
#enddef
def print_ddt_map_request(self):
lprint("Queued Map-Request from {}ITR {}->{}, nonce 0x{}".format( \
"P" if self.from_pitr else "",
red(self.itr.print_address(), False),
green(self.eid.print_address(), False), self.nonce))
#enddef
def queue_map_request(self):
self.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [self])
self.retransmit_timer.start()
lisp_ddt_map_requestQ[str(self.nonce)] = self
#enddef
def dequeue_map_request(self):
self.retransmit_timer.cancel()
if (self.nonce in lisp_ddt_map_requestQ):
lisp_ddt_map_requestQ.pop(str(self.nonce))
#endif
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# -------------------------------------------------------------------
# Type (Action field) Incomplete Referral-set TTL values
# -------------------------------------------------------------------
# 0 NODE-REFERRAL NO YES 1440
#
# 1 MS-REFERRAL NO YES 1440
#
# 2 MS-ACK * * 1440
#
# 3 MS-NOT-REGISTERED * * 1
#
# 4 DELEGATION-HOLE NO NO 15
#
# 5 NOT-AUTHORITATIVE YES NO 0
# -------------------------------------------------------------------
#
LISP_DDT_ACTION_SITE_NOT_FOUND = -2
LISP_DDT_ACTION_NULL = -1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
lisp_map_referral_action_string = [
"node-referral", "ms-referral", "ms-ack", "ms-not-registered",
"delegation-hole", "not-authoritative"]
#
# Info-Request/Reply
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=7 |R| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | EID mask-len | EID-prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Request specific information following the EID-prefix with
# EID-prefix-AFI set to 0. EID appened below follows with hostname
# or AFI=0:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | <hostname--null-terminated> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 0 | <Nothing Follows AFI=0> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Reply specific information following the EID-prefix:
#
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = 16387 | Rsvd1 | Flags |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Type = 7 | Rsvd2 | 4 + n |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# N | MS UDP Port Number | ETR UDP Port Number |
# A +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# T | AFI = x | Global ETR RLOC Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L | AFI = x | MS RLOC Address ... |
# C +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# A | AFI = x | Private ETR RLOC Address ... |
# F +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address 1 ... |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address n ... |
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# This encoding will not use authentication so we respond to anyone who
# sends an Info-Request. And the EID-prefix will have AFI=0.
#
class lisp_info(object):
def __init__(self):
self.info_reply = False
self.nonce = 0
self.private_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_ms_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.ms_port = 0
self.etr_port = 0
self.rtr_list = []
self.hostname = lisp_hostname
#enddef
def print_info(self):
if (self.info_reply):
req_or_reply = "Info-Reply"
rloc = (", ms-port: {}, etr-port: {}, global-rloc: {}, " + \
"ms-rloc: {}, private-rloc: {}, RTR-list: ").format( \
self.ms_port, self.etr_port,
red(self.global_etr_rloc.print_address_no_iid(), False),
red(self.global_ms_rloc.print_address_no_iid(), False),
red(self.private_etr_rloc.print_address_no_iid(), False))
if (len(self.rtr_list) == 0): rloc += "empty, "
for rtr in self.rtr_list:
rloc += red(rtr.print_address_no_iid(), False) + ", "
#endfor
rloc = rloc[0:-2]
else:
req_or_reply = "Info-Request"
hostname = "<none>" if self.hostname == None else self.hostname
rloc = ", hostname: {}".format(blue(hostname, False))
#endif
lprint("{} -> nonce: 0x{}{}".format(bold(req_or_reply, False),
lisp_hex_string(self.nonce), rloc))
#enddef
def encode(self):
first_long = (LISP_NAT_INFO << 28)
if (self.info_reply): first_long |= (1 << 27)
#
# Encode first-long, nonce, key-id longword, TTL and EID mask-len/
# EID-prefix AFI. There is no auth data field since auth len is 0.
# Zero out key-id, auth-data-len, ttl, reserved, eid-mask-len, and
# eid-prefix-afi.
#
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
packet += struct.pack("III", 0, 0, 0)
#
# Add hostname null terminated string with AFI 17.
#
if (self.info_reply == False):
if (self.hostname == None):
packet += struct.pack("H", 0)
else:
packet += struct.pack("H", socket.htons(LISP_AFI_NAME))
packet += (self.hostname + "\0").encode()
#endif
return(packet)
#endif
#
# If Info-Reply, encode Type 7 LCAF.
#
afi = socket.htons(LISP_AFI_LCAF)
lcaf_type = LISP_LCAF_NAT_TYPE
lcaf_len = socket.htons(16)
ms_port = socket.htons(self.ms_port)
etr_port = socket.htons(self.etr_port)
packet += struct.pack("HHBBHHHH", afi, 0, lcaf_type, 0, lcaf_len,
ms_port, etr_port, socket.htons(self.global_etr_rloc.afi))
packet += self.global_etr_rloc.pack_address()
packet += struct.pack("HH", 0, socket.htons(self.private_etr_rloc.afi))
packet += self.private_etr_rloc.pack_address()
if (len(self.rtr_list) == 0): packet += struct.pack("H", 0)
#
# Encode RTR list.
#
for rtr in self.rtr_list:
packet += struct.pack("H", socket.htons(rtr.afi))
packet += rtr.pack_address()
#endfor
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long)
self.nonce = nonce[0]
self.info_reply = first_long & 0x08000000
self.hostname = None
packet = packet[format_size::]
#
# Parse key-id, auth-len, auth-data, and EID-record. We don't support
# any of these. On encode, we set 3 longs worth of 0.
#
packet_format = "HH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# If an LCAF value appears in the key-id field, then this is an
# old style Echo-Reply (that NX-OS implemented).
#
key_id, auth_len = struct.unpack(packet_format, packet[:format_size])
if (auth_len != 0): return(None)
packet = packet[format_size::]
packet_format = "IBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
ttl, rsvd, ml, eid_afi = struct.unpack(packet_format,
packet[:format_size])
if (eid_afi != 0): return(None)
packet = packet[format_size::]
#
# Check if name supplied.
#
if (self.info_reply == False):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[format_size::]
packet, self.hostname = lisp_decode_dist_name(packet)
#endif
#endif
return(orig_packet)
#endif
#
# Process Info-Reply.
#
packet_format = "HHBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, x, lcaf_type, rsvd, lcaf_len, ms_port, etr_port = \
struct.unpack(packet_format, packet[:format_size])
if (socket.ntohs(afi) != LISP_AFI_LCAF): return(None)
self.ms_port = socket.ntohs(ms_port)
self.etr_port = socket.ntohs(etr_port)
packet = packet[format_size::]
#
# Get addresses one AFI at a time.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# Get global ETR RLOC address.
#
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_etr_rloc.afi = socket.ntohs(afi)
packet = self.global_etr_rloc.unpack_address(packet)
if (packet == None): return(None)
self.global_etr_rloc.mask_len = \
self.global_etr_rloc.host_mask_len()
#endif
#
# Get global MS RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_ms_rloc.afi = socket.ntohs(afi)
packet = self.global_ms_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.global_ms_rloc.mask_len = self.global_ms_rloc.host_mask_len()
#endif
#
# Get private ETR RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.private_etr_rloc.afi = socket.ntohs(afi)
packet = self.private_etr_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.private_etr_rloc.mask_len = \
self.private_etr_rloc.host_mask_len()
#endif
#
# Get RTR list if any.
#
while (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi == 0): continue
rtr = lisp_address(socket.ntohs(afi), "", 0, 0)
packet = rtr.unpack_address(packet)
if (packet == None): return(orig_packet)
rtr.mask_len = rtr.host_mask_len()
self.rtr_list.append(rtr)
#endwhile
return(orig_packet)
#enddef
#endclass
class lisp_nat_info(object):
def __init__(self, addr_str, hostname, port):
self.address = addr_str
self.hostname = hostname
self.port = port
self.uptime = lisp_get_timestamp()
#enddef
def timed_out(self):
elapsed = time.time() - self.uptime
return(elapsed >= (LISP_INFO_INTERVAL * 2))
#enddef
#endclass
class lisp_info_source(object):
def __init__(self, hostname, addr_str, port):
self.address = lisp_address(LISP_AFI_IPV4, addr_str, 32, 0)
self.port = port
self.uptime = lisp_get_timestamp()
self.nonce = None
self.hostname = hostname
self.no_timeout = False
#enddef
def cache_address_for_info_source(self):
key = self.address.print_address_no_iid() + self.hostname
lisp_info_sources_by_address[key] = self
#enddef
def cache_nonce_for_info_source(self, nonce):
self.nonce = nonce
lisp_info_sources_by_nonce[nonce] = self
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_concat_auth_data
#
# Take each longword and convert to binascii by byte-swapping and zero filling
# longword that leads with 0.
#
def lisp_concat_auth_data(alg_id, auth1, auth2, auth3, auth4):
if (lisp_is_x86()):
if (auth1 != ""): auth1 = byte_swap_64(auth1)
if (auth2 != ""): auth2 = byte_swap_64(auth2)
if (auth3 != ""):
if (alg_id == LISP_SHA_1_96_ALG_ID): auth3 = socket.ntohl(auth3)
else: auth3 = byte_swap_64(auth3)
#endif
if (auth4 != ""): auth4 = byte_swap_64(auth4)
#endif
if (alg_id == LISP_SHA_1_96_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(8)
auth_data = auth1 + auth2 + auth3
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(16)
auth4 = lisp_hex_string(auth4)
auth4 = auth4.zfill(16)
auth_data = auth1 + auth2 + auth3 + auth4
#endif
return(auth_data)
#enddef
#
# lisp_open_listen_socket
#
# Open either internal socket or network socket. If network socket, it will
# open it with a local address of 0::0 which means the one socket can be
# used for IPv4 or IPv6. This is goodness and reduces the number of threads
# required.
#
def lisp_open_listen_socket(local_addr, port):
if (port.isdigit()):
if (local_addr.find(".") != -1):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (local_addr.find(":") != -1):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
sock.bind((local_addr, int(port)))
else:
name = port
if (os.path.exists(name)):
os.system("rm " + name)
time.sleep(1)
#endif
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(name)
#endif
return(sock)
#enddef
#
# lisp_open_send_socket
#
# Open socket for sending to port 4342.
#
def lisp_open_send_socket(internal_name, afi):
if (internal_name == ""):
if (afi == LISP_AFI_IPV4):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (afi == LISP_AFI_IPV6):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
else:
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(internal_name)
#endif
return(sock)
#enddef
#
# lisp_close_socket
#
# Close network and internal sockets.
#
def lisp_close_socket(sock, internal_name):
sock.close()
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
return
#endif
#
# lisp_is_running
#
# Test if one of "lisp-itr", "lisp-etr", "lisp-mr", "lisp-ms", "lisp-ddt", or
# "lisp-core" is running.
#
def lisp_is_running(node):
return(True if (os.path.exists(node)) else False)
#enddef
#
# lisp_packet_ipc
#
# Build IPC message for a LISP control packet destined for UDP port 4342. This
# packet goes to the lisp-core process and then it IPCs it to the appropriate
# LISP component process.
#
# Returns a byte string.
#
def lisp_packet_ipc(packet, source, sport):
header = "packet@{}@{}@{}@".format(str(len(packet)), source, str(sport))
return(header.encode() + packet)
#enddef
#
# lisp_control_packet_ipc
#
# Build IPC message for a packet that needs to be source from UDP port 4342.
# Always sent by a LISP component process to the lisp-core process.
#
# Returns a byte string.
#
def lisp_control_packet_ipc(packet, source, dest, dport):
header = "control-packet@{}@{}@".format(dest, str(dport))
return(header.encode() + packet)
#enddef
#
# lisp_data_packet_ipc
#
# Build IPC message for a MAC, IPv4, or IPv6 data packet.
#
# Returns a byte string.
#
def lisp_data_packet_ipc(packet, source):
header = "data-packet@{}@{}@@".format(str(len(packet)), source)
return(header.encode() + packet)
#enddef
#
# lisp_command_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
# Returns a byte string. Variable "ipc" is a string.
#
def lisp_command_ipc(ipc, source):
packet = "command@{}@{}@@".format(len(ipc), source) + ipc
return(packet.encode())
#enddef
#
# lisp_api_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
# Returns a byte string. Variable "data" is a string.
#
def lisp_api_ipc(source, data):
packet = "api@" + str(len(data)) + "@" + source + "@@" + data
return(packet.encode())
#enddef
#
# lisp_ipc
#
# Send IPC message to internal AF_UNIX socket if LISP component is running. We
# need to send in 15000 byte segments since the socket interface will not allow
# to support more. And socket.setsockopt() won't alow to increase SO_SNDBUF.
#
# Variable "packet" is of type byte string. Caller must adhere. Since packet
# is going out a socket interface (even if internal).
#
def lisp_ipc(packet, send_socket, node):
#
# Can't send an IPC message to a process that is not running.
#
if (lisp_is_running(node) == False):
lprint("Suppress sending IPC to {}".format(node))
return
#endif
ipc_len = 1500 if (packet.find(b"control-packet") == -1) else 9000
offset = 0
length = len(packet)
retry_count = 0
sleep_time = .001
while (length > 0):
segment_len = min(length, ipc_len)
segment = packet[offset:segment_len+offset]
try:
if (type(segment) == str): segment = segment.encode()
send_socket.sendto(segment, node)
lprint("Send IPC {}-out-of-{} byte to {} succeeded".format( \
len(segment), len(packet), node))
retry_count = 0
sleep_time = .001
except socket.error as e:
if (retry_count == 12):
lprint("Giving up on {}, consider it down".format(node))
break
#endif
lprint("Send IPC {}-out-of-{} byte to {} failed: {}".format( \
len(segment), len(packet), node, e))
retry_count += 1
time.sleep(sleep_time)
lprint("Retrying after {} ms ...".format(sleep_time * 1000))
sleep_time *= 2
continue
#endtry
offset += segment_len
length -= segment_len
#endwhile
return
#enddef
#
# lisp_format_packet
#
# Put a whitespace between every 4 bytes of a packet dump. Returns string
# and not byte string like supplied "packet" type.
#
def lisp_format_packet(packet):
packet = binascii.hexlify(packet)
offset = 0
new = b""
length = len(packet) * 2
while (offset < length):
new += packet[offset:offset+8] + b" "
offset += 8
length -= 4
#endfor
return(new.decode())
#enddef
#
# lisp_send
#
# Send packet out.
#
def lisp_send(lisp_sockets, dest, port, packet):
lisp_socket = lisp_sockets[0] if dest.is_ipv4() else lisp_sockets[1]
#
# Remove square brackets. Use an IPv4 socket when address is IPv4, even
# when embedded in ::ffff:<ipv4-address>. This is a special case when
# an RTR sits behind a NAT and is sending a Map-Request. The ECM and
# Map-Request need to use the same ephemeral port and the Map-Reply
# needs to come to the ephemeral listening socket lisp_sockets[0];
#
# Also, on getchip and raspberry-pi OSes, there is no support for IPv6
# sockets, so we need to use the IPv4 embedded address and the IPv4
# socket.
#
address = dest.print_address_no_iid()
if (address.find("::ffff:") != -1 and address.count(".") == 3):
if (lisp_i_am_rtr): lisp_socket = lisp_sockets[0]
if (lisp_socket == None):
lisp_socket = lisp_sockets[0]
address = address.split("::ffff:")[-1]
#endif
#endif
lprint("{} {} bytes {} {}, packet: {}".format(bold("Send", False),
len(packet), bold("to " + address, False), port,
lisp_format_packet(packet)))
#
# If Map-Request/Reply RLOC-probe set TTL for outgoing packet to 255.
#
set_ttl = (LISP_RLOC_PROBE_TTL == 128)
if (set_ttl):
lisp_type = struct.unpack("B", packet[0:1])[0]
set_ttl = (lisp_type in [0x12, 0x28])
if (set_ttl): lisp_set_ttl(lisp_socket, LISP_RLOC_PROBE_TTL)
#endif
try: lisp_socket.sendto(packet, (address, port))
except socket.error as e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#
# Set back to default TTL.
#
if (set_ttl): lisp_set_ttl(lisp_socket, 64)
return
#enddef
#
# lisp_receive_segments
#
# Process 1500 byte segments if received IPC packet greater than what sockets
# can support.
#
def lisp_receive_segments(lisp_socket, packet, source, total_length):
#
# If the total length is equal to the segment length. We only have one
# segment which is the packet. Return it.
#
segment_len = total_length - len(packet)
if (segment_len == 0): return([True, packet])
lprint("Received {}-out-of-{} byte segment from {}".format(len(packet),
total_length, source))
#
# Otherwise, receive each segment and assemble it to return entire packet
# to caller.
#
length = segment_len
while (length > 0):
try: segment = lisp_socket.recvfrom(9000)
except: return([False, None])
segment = segment[0]
#
# The sender gave up and sent a new message that made it to us, last
# partial packet must be dropped.
#
seg = segment.decode()
if (seg.find("packet@") == 0):
seg = seg.split("@")
lprint("Received new message ({}-out-of-{}) while receiving " + \
"fragments, old message discarded", len(segment),
seg[1] if len(seg) > 2 else "?")
return([False, segment])
#endif
length -= len(segment)
packet += segment
lprint("Received {}-out-of-{} byte segment from {}".format( \
len(segment), total_length, source))
#endwhile
return([True, packet])
#enddef
#
# lisp_bit_stuff
#
# For every element in the array, insert a 0x40 ("@"). This is a bit-stuffing
# procedure. Only look at array elements with index 2 and above. Caller
# passes a byte string.
#
def lisp_bit_stuff(payload):
lprint("Bit-stuffing, found {} segments".format(len(payload)))
packet = b""
for segment in payload: packet += segment + b"\x40"
return(packet[:-1])
#enddef
#
# lisp_receive
#
# Wait for packet to come in. This function call will block. For command
# IPCs, we need to loop to assemble all segments.
#
# For an internal socket, the format of a recvfrom() 'packet-data' is:
#
# "command" @ <total-length> @ <source> @ <packet-buffer>
# "packet" @ <total-length> @ <source> @ <command-buffer>
#
# So when an array of length 4 does not exist, we are receiving a fragment.
#
# For an external network socket, the format of a recvfrom() is:
#
# packet_data[0] = <packet-buffer>
# packet_data[1] = [<source>, <port>]
#
def lisp_receive(lisp_socket, internal):
while (True):
#
# Read from socket. Return if we received an error.
#
try: packet_data = lisp_socket.recvfrom(9000)
except: return(["", "", "", ""])
#
# This is a packet received on the network. If it was fragmented at the
# sender, then IP did it so it is assebled into a complete datagram
# in this sytem.
#
if (internal == False):
packet = packet_data[0]
source = lisp_convert_6to4(packet_data[1][0])
port = packet_data[1][1]
if (port == LISP_DATA_PORT):
do_log = lisp_data_plane_logging
packet_str = lisp_format_packet(packet[0:60]) + " ..."
else:
do_log = True
packet_str = lisp_format_packet(packet)
#endif
if (do_log):
lprint("{} {} bytes {} {}, packet: {}".format(bold("Receive",
False), len(packet), bold("from " + source, False), port,
packet_str))
#endif
return(["packet", source, port, packet])
#endif
#
# This is an IPC message that can be fragmented by lisp-core or the
# sending socket interface.
#
assembled = False
data = packet_data[0]
if (type(data) == str): data = data.encode()
loop = False
while (assembled == False):
data = data.split(b"@")
if (len(data) < 4):
lprint("Possible fragment (length {}), from old message, " + \
"discarding", len(data[0]))
loop = True
break
#endif
opcode = data[0].decode()
try:
total_length = int(data[1])
except:
error_str = bold("Internal packet reassembly error", False)
lprint("{}: {}".format(error_str, packet_data))
loop = True
break
#endtry
source = data[2].decode()
port = data[3].decode()
#
# If any of the data payload has a 0x40 byte (which is "@" in
# ascii), we will confuse the IPC separator from real data.
# So go to the payload and put in 0x40 where split() seperated
# the data. This particularly happens with Map-Notify messages
# since the first byte of the message is 0x40.
#
if (len(data) > 5):
packet = lisp_bit_stuff(data[4::])
else:
packet = data[4]
#endif
#
# Check for reassembly. Once reassembled, then we can process one
# large packet.
#
assembled, packet = lisp_receive_segments(lisp_socket, packet,
source, total_length)
if (packet == None): return(["", "", "", ""])
#
# We did not finish assembling a message but the sender sent a new
# one.
#
if (assembled == False):
data = packet
continue
#endif
if (port == ""): port = "no-port"
if (opcode == "command" and lisp_i_am_core == False):
index = packet.find(b" {")
command = packet if index == -1 else packet[:index]
command = ": '" + command.decode() + "'"
else:
command = ""
#endif
lprint("{} {} bytes {} {}, {}{}".format(bold("Receive", False),
len(packet), bold("from " + source, False), port, opcode,
command if (opcode in ["command", "api"]) else ": ... " if \
(opcode == "data-packet") else \
": " + lisp_format_packet(packet)))
#endif
#endwhile
if (loop): continue
return([opcode, source, port, packet])
#endwhile
#enddef
#
# lisp_parse_packet
#
# Parse LISP control message.
#
def lisp_parse_packet(lisp_sockets, packet, source, udp_sport, ttl=-1):
trigger_flag = False
timestamp = time.time()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return(trigger_flag)
#endif
#
# Store source in internal lisp_address() format.
#
from_ipc = source
if (source.find("lisp") == -1):
s = lisp_address(LISP_AFI_NONE, "", 0, 0)
s.string_to_afi(source)
s.store_address(source)
source = s
#endif
if (header.type == LISP_MAP_REQUEST):
lisp_process_map_request(lisp_sockets, packet, None, 0, source,
udp_sport, False, ttl, timestamp)
elif (header.type == LISP_MAP_REPLY):
lisp_process_map_reply(lisp_sockets, packet, source, ttl, timestamp)
elif (header.type == LISP_MAP_REGISTER):
lisp_process_map_register(lisp_sockets, packet, source, udp_sport)
elif (header.type == LISP_MAP_NOTIFY):
if (from_ipc == "lisp-etr"):
lisp_process_multicast_map_notify(packet, source)
elif (lisp_is_running("lisp-rtr")):
lisp_process_multicast_map_notify(packet, source)
elif (lisp_is_running("lisp-itr")):
lisp_process_unicast_map_notify(lisp_sockets, packet, source)
#endif
elif (header.type == LISP_MAP_NOTIFY_ACK):
lisp_process_map_notify_ack(packet, source)
elif (header.type == LISP_MAP_REFERRAL):
lisp_process_map_referral(lisp_sockets, packet, source)
elif (header.type == LISP_NAT_INFO and header.is_info_reply()):
x, y, trigger_flag = lisp_process_info_reply(source, packet, True)
elif (header.type == LISP_NAT_INFO and header.is_info_reply() == False):
addr_str = source.print_address_no_iid()
lisp_process_info_request(lisp_sockets, packet, addr_str, udp_sport,
None)
elif (header.type == LISP_ECM):
lisp_process_ecm(lisp_sockets, packet, source, udp_sport)
else:
lprint("Invalid LISP control packet type {}".format(header.type))
#endif
return(trigger_flag)
#enddef
#
# lisp_process_rloc_probe_request
#
# Process Map-Request with RLOC-probe bit set.
#
def lisp_process_rloc_probe_request(lisp_sockets, map_request, source, port,
ttl, timestamp):
p = bold("RLOC-probe", False)
if (lisp_i_am_etr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_etr_process_map_request(lisp_sockets, map_request, source, port,
ttl, timestamp)
return
#endif
if (lisp_i_am_rtr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_rtr_process_map_request(lisp_sockets, map_request, source, port,
ttl, timestamp)
return
#endif
lprint("Ignoring received {} Map-Request, not an ETR or RTR".format(p))
return
#enddef
#
# lisp_process_smr
#
def lisp_process_smr(map_request):
lprint("Received SMR-based Map-Request")
return
#enddef
#
# lisp_process_smr_invoked_request
#
def lisp_process_smr_invoked_request(map_request):
lprint("Received SMR-invoked Map-Request")
return
#enddef
#
# lisp_build_map_reply
#
# Build a Map-Reply and return a packet to the caller.
#
def lisp_build_map_reply(eid, group, rloc_set, nonce, action, ttl, map_request,
keys, enc, auth, mr_ttl=-1):
rloc_probe = map_request.rloc_probe if (map_request != None) else False
json_telemetry = map_request.json_telemetry if (map_request != None) else \
None
map_reply = lisp_map_reply()
map_reply.rloc_probe = rloc_probe
map_reply.echo_nonce_capable = enc
map_reply.hop_count = 0 if (mr_ttl == -1) else mr_ttl
map_reply.record_count = 1
map_reply.nonce = nonce
packet = map_reply.encode()
map_reply.print_map_reply()
eid_record = lisp_eid_record()
eid_record.rloc_count = len(rloc_set)
if (json_telemetry != None): eid_record.rloc_count += 1
eid_record.authoritative = auth
eid_record.record_ttl = ttl
eid_record.action = action
eid_record.eid = eid
eid_record.group = group
packet += eid_record.encode()
eid_record.print_record(" ", False)
local_rlocs = lisp_get_all_addresses() + lisp_get_all_translated_rlocs()
probing_rloc = None
for rloc_entry in rloc_set:
multicast = rloc_entry.rloc.is_multicast_address()
rloc_record = lisp_rloc_record()
probe_bit = rloc_probe and (multicast or json_telemetry == None)
addr_str = rloc_entry.rloc.print_address_no_iid()
if (addr_str in local_rlocs or multicast):
rloc_record.local_bit = True
rloc_record.probe_bit = probe_bit
rloc_record.keys = keys
if (rloc_entry.priority == 254 and lisp_i_am_rtr):
rloc_record.rloc_name = "RTR"
#endif
if (probing_rloc == None): probing_rloc = rloc_entry.rloc
#endif
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.reach_bit = True
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endfor
#
# Add etr-out-ts if telemetry data was present in Map-Request.
#
if (json_telemetry != None):
rloc_record = lisp_rloc_record()
if (probing_rloc): rloc_record.rloc.copy_address(probing_rloc)
rloc_record.local_bit = True
rloc_record.probe_bit = True
rloc_record.reach_bit = True
if (lisp_i_am_rtr):
rloc_record.priority = 254
rloc_record.rloc_name = "RTR"
#endif
js = lisp_encode_telemetry(json_telemetry, eo=str(time.time()))
rloc_record.json = lisp_json("telemetry", js)
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endif
return(packet)
#enddef
#
# lisp_build_map_referral
#
# Build a Map-Referral and return a packet to the caller.
#
def lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce):
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
eid_record = lisp_eid_record()
rloc_count = 0
if (ddt_entry == None):
eid_record.eid = eid
eid_record.group = group
else:
rloc_count = len(ddt_entry.delegation_set)
eid_record.eid = ddt_entry.eid
eid_record.group = ddt_entry.group
ddt_entry.map_referrals_sent += 1
#endif
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
#
# Use action passed into this function. But if NULL, select the action
# based on the first ddt-node child type.
#
incomplete = False
if (action == LISP_DDT_ACTION_NULL):
if (rloc_count == 0):
action = LISP_DDT_ACTION_NODE_REFERRAL
else:
ddt_node = ddt_entry.delegation_set[0]
if (ddt_node.is_ddt_child()):
action = LISP_DDT_ACTION_NODE_REFERRAL
#endif
if (ddt_node.is_ms_child()):
action = LISP_DDT_ACTION_MS_REFERRAL
#endif
#endif
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (lisp_i_am_ms and ddt_node.is_ms_peer() == False)
#endif
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
if (rloc_count == 0): return(packet)
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
return(packet)
#enddef
#
# lisp_etr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_etr_process_map_request(lisp_sockets, map_request, source, sport,
ttl, etr_in_ts):
if (map_request.target_group.is_null()):
db = lisp_db_for_lookups.lookup_cache(map_request.target_eid, False)
else:
db = lisp_db_for_lookups.lookup_cache(map_request.target_group, False)
if (db): db = db.lookup_source_cache(map_request.target_eid, False)
#endif
eid_str = map_request.print_prefix()
if (db == None):
lprint("Database-mapping entry not found for requested EID {}". \
format(green(eid_str, False)))
return
#endif
prefix_str = db.print_eid_tuple()
lprint("Found database-mapping EID-prefix {} for requested EID {}". \
format(green(prefix_str, False), green(eid_str, False)))
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address() and lisp_nat_traversal):
itr_rloc = source
#endif
nonce = map_request.nonce
enc = lisp_nonce_echoing
keys = map_request.keys
#
# If we found telemetry data in the Map-Request, add the input timestamp
# now and add output timestamp when building the Map-Reply.
#
jt = map_request.json_telemetry
if (jt != None):
map_request.json_telemetry = lisp_encode_telemetry(jt, ei=etr_in_ts)
#endif
db.map_replies_sent += 1
packet = lisp_build_map_reply(db.eid, db.group, db.rloc_set, nonce,
LISP_NO_ACTION, 1440, map_request, keys, enc, True, ttl)
#
# If we are sending a RLOC-probe Map-Reply to an RTR, data encapsulate it.
# If we are getting RLOC-probe Map-Requests from an xTR behind a NAT, and
# we are an ETR not behind a NAT, we want return the RLOC-probe Map-Reply
# to the swapped control ports.
#
# We could be getting a RLOC-probe from an xTR that is behind the same
# NAT as us. So do not data encapsulate the RLOC-probe reply.
#
# There is a special hack here. If the sport is 0, this RLOC-probe
# request is coming from an RTR. If we are doing gleaning on the RTR,
# this xTR needs to data encapsulate the RLOC-probe reply. The lisp_rtr_
# list will not be set because a gleaned xTR does not have NAT-traversal
# enabled.
#
if (map_request.rloc_probe and len(lisp_sockets) == 4):
public = (itr_rloc.is_private_address() == False)
rtr = itr_rloc.print_address_no_iid()
if (public and rtr in lisp_rtr_list or sport == 0):
lisp_encapsulate_rloc_probe(lisp_sockets, itr_rloc, None, packet)
return
#endif
#endif
#
# Send to lisp-core process to send packet from UDP port 4342.
#
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_rtr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_rtr_process_map_request(lisp_sockets, map_request, source, sport,
ttl, etr_in_ts):
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address()): itr_rloc = source
nonce = map_request.nonce
eid = map_request.target_eid
group = map_request.target_group
rloc_set = []
for myrloc in [lisp_myrlocs[0], lisp_myrlocs[1]]:
if (myrloc == None): continue
rloc = lisp_rloc()
rloc.rloc.copy_address(myrloc)
rloc.priority = 254
rloc_set.append(rloc)
#endfor
enc = lisp_nonce_echoing
keys = map_request.keys
#
# If we found telemetry data in the Map-Request, add the input timestamp
# now and add output timestamp in building the Map-Reply.
#
jt = map_request.json_telemetry
if (jt != None):
map_request.json_telemetry = lisp_encode_telemetry(jt, ei=etr_in_ts)
#endif
packet = lisp_build_map_reply(eid, group, rloc_set, nonce, LISP_NO_ACTION,
1440, map_request, keys, enc, True, ttl)
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_get_private_rloc_set
#
# If the source-EID and target-EID of a Map-Request are behind the same NAT,
# that is, have the same global RLOC address, then return just the private
# addresses in the Map-Reply so the xTRs have shortest RLOC paths between
# each other and don't have to hair-pin through the NAT/firewall device.
#
def lisp_get_private_rloc_set(target_site_eid, seid, group):
rloc_set = target_site_eid.registered_rlocs
source_site_eid = lisp_site_eid_lookup(seid, group, False)
if (source_site_eid == None): return(rloc_set)
#
# Get global RLOC address from target site.
#
target_rloc = None
new_set = []
for rloc_entry in rloc_set:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()):
new_rloc = copy.deepcopy(rloc_entry)
new_set.append(new_rloc)
continue
#endif
target_rloc = rloc_entry
break
#endfor
if (target_rloc == None): return(rloc_set)
target_rloc = target_rloc.rloc.print_address_no_iid()
#
# Get global RLOC address from source site.
#
source_rloc = None
for rloc_entry in source_site_eid.registered_rlocs:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()): continue
source_rloc = rloc_entry
break
#endfor
if (source_rloc == None): return(rloc_set)
source_rloc = source_rloc.rloc.print_address_no_iid()
#
# If the xTRs are behind the same NAT, then we return private addresses.
#
site_id = target_site_eid.site_id
if (site_id == 0):
if (source_rloc == target_rloc):
lprint("Return private RLOCs for sites behind {}".format( \
target_rloc))
return(new_set)
#endif
return(rloc_set)
#endif
#
# If the xTRs are not behind the same NAT, but are configured in the
# same site-id, they can reach each other with private addresses. So
# return them in the RLOC-set.
#
if (site_id == source_site_eid.site_id):
lprint("Return private RLOCs for sites in site-id {}".format(site_id))
return(new_set)
#endif
return(rloc_set)
#enddef
#
# lisp_get_partial_rloc_set
#
# If the Map-Request source is found in the RLOC-set, return all RLOCs that
# do not have the same priority as the Map-Request source (an RTR supporting
# NAT-traversal) RLOC. Otherwise, return all RLOCs that are not priority 254.
#
def lisp_get_partial_rloc_set(registered_rloc_set, mr_source, multicast):
rtr_list = []
rloc_set = []
#
# Search the RTR list to see if the Map-Requestor is an RTR. If so,
# return the RLOC-set to the RTR so it can replicate directly to ETRs.
# Otherwise, return the RTR-list locator-set to the requesting ITR/PITR.
#
rtr_is_requestor = False
behind_nat = False
for rloc_entry in registered_rloc_set:
if (rloc_entry.priority != 254): continue
behind_nat |= True
if (rloc_entry.rloc.is_exact_match(mr_source) == False): continue
rtr_is_requestor = True
break
#endfor
#
# If we find an RTR in the RLOC-set, then the site's RLOC-set is behind
# a NAT. Otherwise, do not return a partial RLOC-set. This RLOC-set is in
# public space.
#
if (behind_nat == False): return(registered_rloc_set)
#
# An RTR can be behind a NAT when deployed in a cloud infrastructure.
# When the MS is in the same cloud infrastructure, the source address
# of the Map-Request (ECM) is not translated. So we are forced to put
# the private address in the rtr-list the MS advertises. But we should
# not return the private address in any Map-Replies. We use the private
# address in the rtr-list for the sole purpose to identify the RTR so
# we can return the RLOC-set of the ETRs.
#
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
#
# Create two small lists. A list of RTRs which are unicast priority of
# 254 and a rloc-set which are records that are not priority 254.
#
for rloc_entry in registered_rloc_set:
if (ignore_private and rloc_entry.rloc.is_private_address()): continue
if (multicast == False and rloc_entry.priority == 255): continue
if (multicast and rloc_entry.mpriority == 255): continue
if (rloc_entry.priority == 254):
rtr_list.append(rloc_entry)
else:
rloc_set.append(rloc_entry)
#endif
#endif
#
# The RTR is sending the Map-Request.
#
if (rtr_is_requestor): return(rloc_set)
#
# An ITR is sending the Map-Request.
#
# Chcek the case where an ETR included a local RLOC and may be behind
# the same NAT as the requester. In this case, the requester can encap
# directly the private RLOC. If it is not reachable, the ITR can encap
# to the RTR. The ITR will cache a subset of the RLOC-set in this entry
# (so it can check the global RLOC first and not encap to itself).
#
# This can also be true for IPv6 RLOCs. So include them.
#
rloc_set = []
for rloc_entry in registered_rloc_set:
if (rloc_entry.rloc.is_ipv6()): rloc_set.append(rloc_entry)
if (rloc_entry.rloc.is_private_address()): rloc_set.append(rloc_entry)
#endfor
rloc_set += rtr_list
return(rloc_set)
#enddef
#
# lisp_store_pubsub_state
#
# Take information from Map-Request to create a pubsub cache. We remember
# the map-server lookup EID-prefix. So when the RLOC-set changes for this
# EID-prefix, we trigger a Map-Notify messate to the ITR's RLOC and port
# number.
#
def lisp_store_pubsub_state(reply_eid, itr_rloc, mr_sport, nonce, ttl, xtr_id):
pubsub = lisp_pubsub(itr_rloc, mr_sport, nonce, ttl, xtr_id)
pubsub.add(reply_eid)
return(pubsub)
#enddef
#
# lisp_convert_reply_to_notify
#
# In lisp_ms_process_map_request(), a proxy map-reply is built to return to
# a requesting ITR. If the requesting ITR set the N-bit in the Map-Request,
# a subscription request is being requested, return a Map-Notify so it knows
# it has been acked.
#
# This function takes a fully built Map-Reply, changes the first 4 bytes to
# make the message a Map-Notify and inserts 4-bytes of Key-ID, Alg-ID, and
# Authentication Length of 0. Then we have converted the Map-Reply into a
# Map-Notify.
#
def lisp_convert_reply_to_notify(packet):
#
# Get data we need from Map-Reply for Map-Notify.
#
record_count = struct.unpack("I", packet[0:4])[0]
record_count = socket.ntohl(record_count) & 0xff
nonce = packet[4:12]
packet = packet[12::]
#
# Build Map-Notify header.
#
first_long = (LISP_MAP_NOTIFY << 28) | record_count
header = struct.pack("I", socket.htonl(first_long))
auth = struct.pack("I", 0)
#
# Concat fields of Map-Notify.
#
packet = header + nonce + auth + packet
return(packet)
#enddef
#
# lisp_notify_subscribers
#
# There has been an RLOC-set change, inform all subscribers who have subscribed
# to this EID-prefix.
#
def lisp_notify_subscribers(lisp_sockets, eid_record, rloc_records,
registered_eid, site):
for peid in lisp_pubsub_cache:
for pubsub in list(lisp_pubsub_cache[peid].values()):
e = pubsub.eid_prefix
if (e.is_more_specific(registered_eid) == False): continue
itr = pubsub.itr
port = pubsub.port
itr_str = red(itr.print_address_no_iid(), False)
sub_str = bold("subscriber", False)
xtr_id = "0x" + lisp_hex_string(pubsub.xtr_id)
nonce = "0x" + lisp_hex_string(pubsub.nonce)
lprint(" Notify {} {}:{} xtr-id {} for {}, nonce {}".format( \
sub_str, itr_str, port, xtr_id, green(peid, False), nonce))
#
# Do not use memory from EID-record of Map-Register since we are
# over-writing EID for Map-Notify message.
#
pubsub_record = copy.deepcopy(eid_record)
pubsub_record.eid.copy_address(e)
pubsub_record = pubsub_record.encode() + rloc_records
lisp_build_map_notify(lisp_sockets, pubsub_record, [peid], 1, itr,
port, pubsub.nonce, 0, 0, 0, site, False)
pubsub.map_notify_count += 1
#endfor
#endfor
return
#enddef
#
# lisp_process_pubsub
#
# Take a fully built Map-Reply and send a Map-Notify as a pubsub ack.
#
def lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc, port, nonce,
ttl, xtr_id):
#
# Store subscriber state.
#
pubsub = lisp_store_pubsub_state(reply_eid, itr_rloc, port, nonce, ttl,
xtr_id)
eid = green(reply_eid.print_prefix(), False)
itr = red(itr_rloc.print_address_no_iid(), False)
mn = bold("Map-Notify", False)
xtr_id = "0x" + lisp_hex_string(xtr_id)
lprint("{} pubsub request for {} to ack ITR {} xtr-id: {}".format(mn,
eid, itr, xtr_id))
#
# Convert Map-Reply to Map-Notify header and send out.
#
packet = lisp_convert_reply_to_notify(packet)
lisp_send_map_notify(lisp_sockets, packet, itr_rloc, port)
pubsub.map_notify_count += 1
return
#enddef
#
# lisp_ms_process_map_request
#
# Do Map-Server processing of a Map-Request. Returns various LISP-DDT internal
# and external action values.
#
def lisp_ms_process_map_request(lisp_sockets, packet, map_request, mr_source,
mr_sport, ecm_source):
#
# Look up EID in site cache. If we find it and it has registered for
# proxy-replying, this map-server will send the Map-Reply. Otherwise,
# send to one of the ETRs at the registered site.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
itr_rloc = map_request.itr_rlocs[0]
xtr_id = map_request.xtr_id
nonce = map_request.nonce
action = LISP_NO_ACTION
pubsub = map_request.subscribe_bit
#
# Check if we are verifying Map-Request signatures. If so, do a mapping
# database lookup on the source-EID to get public-key.
#
sig_good = True
is_crypto_hash = (lisp_get_eid_hash(eid) != None)
if (is_crypto_hash):
sig = map_request.map_request_signature
if (sig == None):
sig_good = False
lprint(("EID-crypto-hash signature verification {}, " + \
"no signature found").format(bold("failed", False)))
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("EID-crypto-hash signature verification {}".format(pf))
#endif
#endif
if (pubsub and sig_good == False):
pubsub = False
lprint("Suppress creating pubsub state due to signature failure")
#endif
#
# There are two cases here that need attention. If the Map-Request was
# an IPv6 Map-Request but the ECM came to us in a IPv4 packet, we need
# to return the Map-Reply in IPv4. And if the Map-Request came to us
# through a NAT, sending the Map-Reply to the Map-Request port won't
# get translated by the NAT. So we have to return the Map-Reply to the
# ECM port. Hopefully, the RTR is listening on the ECM port and using
# the Map-Request port as the ECM port as well. This is typically only
# a problem on the RTR, when behind a NAT. For an ITR, it usaully
# doesn't send Map-Requests since NAT-traversal logic installs default
# map-cache entries.
#
reply_dest = itr_rloc if (itr_rloc.afi == ecm_source.afi) else ecm_source
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None or site_eid.is_star_g()):
notfound = bold("Site not found", False)
lprint("{} for requested EID {}".format(notfound,
green(eid_str, False)))
#
# Send negative Map-Reply with TTL 15 minutes.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, 15, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_SITE_NOT_FOUND])
#endif
prefix_str = site_eid.print_eid_tuple()
site_name = site_eid.site.site_name
#
# If we are requesting for non Crypto-EIDs and signatures are configured
# to be requred and no signature is in the Map-Request, bail.
#
if (is_crypto_hash == False and site_eid.require_signature):
sig = map_request.map_request_signature
sig_eid = map_request.signature_eid
if (sig == None or sig_eid.is_null()):
lprint("Signature required for site {}".format(site_name))
sig_good = False
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("Required signature verification {}".format(pf))
#endif
#endif
#
# Check if site-eid is registered.
#
if (sig_good and site_eid.registered == False):
lprint("Site '{}' with EID-prefix {} is not registered for EID {}". \
format(site_name, green(prefix_str, False), green(eid_str, False)))
#
# We do not to return a coarser EID-prefix to the Map-Resolver. The
# AMS site entry may be one.
#
if (site_eid.accept_more_specifics == False):
eid = site_eid.eid
group = site_eid.group
#endif
#
# Send forced-TTLs even for native-forward entries.
#
ttl = 1
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Send negative Map-Reply with TTL 1 minute.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, ttl, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_MS_NOT_REG])
#endif
#
# Should we proxy-reply?
#
nat = False
pr_str = ""
check_policy = False
if (site_eid.force_nat_proxy_reply):
pr_str = ", nat-forced"
nat = True
check_policy = True
elif (site_eid.force_proxy_reply):
pr_str = ", forced"
check_policy = True
elif (site_eid.proxy_reply_requested):
pr_str = ", requested"
check_policy = True
elif (map_request.pitr_bit and site_eid.pitr_proxy_reply_drop):
pr_str = ", drop-to-pitr"
action = LISP_DROP_ACTION
elif (site_eid.proxy_reply_action != ""):
action = site_eid.proxy_reply_action
pr_str = ", forced, action {}".format(action)
action = LISP_DROP_ACTION if (action == "drop") else \
LISP_NATIVE_FORWARD_ACTION
#endif
#
# Apply policy to determine if we send a negative map-reply with action
# "policy-denied" or we send a map-reply with the policy set parameters.
#
policy_drop = False
policy = None
if (check_policy and site_eid.policy in lisp_policies):
p = lisp_policies[site_eid.policy]
if (p.match_policy_map_request(map_request, mr_source)): policy = p
if (policy):
ps = bold("matched", False)
lprint("Map-Request {} policy '{}', set-action '{}'".format(ps,
p.policy_name, p.set_action))
else:
ps = bold("no match", False)
lprint("Map-Request {} for policy '{}', implied drop".format(ps,
p.policy_name))
policy_drop = True
#endif
#endif
if (pr_str != ""):
lprint("Proxy-replying for EID {}, found site '{}' EID-prefix {}{}". \
format(green(eid_str, False), site_name, green(prefix_str, False),
pr_str))
rloc_set = site_eid.registered_rlocs
ttl = 1440
if (nat):
if (site_eid.site_id != 0):
seid = map_request.source_eid
rloc_set = lisp_get_private_rloc_set(site_eid, seid, group)
#endif
if (rloc_set == site_eid.registered_rlocs):
m = (site_eid.group.is_null() == False)
new_set = lisp_get_partial_rloc_set(rloc_set, reply_dest, m)
if (new_set != rloc_set):
ttl = 15
rloc_set = new_set
#endif
#endif
#endif
#
# Force TTL if configured. To denote seconds in TTL field of EID-record
# set high-order bit in ttl value.
#
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Does policy say what the ttl should be? And if we should drop the
# Map-Request and return a negative Map-Reply
#
if (policy):
if (policy.set_record_ttl):
ttl = policy.set_record_ttl
lprint("Policy set-record-ttl to {}".format(ttl))
#endif
if (policy.set_action == "drop"):
lprint("Policy set-action drop, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
else:
rloc = policy.set_policy_map_reply()
if (rloc): rloc_set = [rloc]
#endif
#endif
if (policy_drop):
lprint("Implied drop action, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
#endif
enc = site_eid.echo_nonce_capable
#
# Don't tell spoofer any prefix information about the target EID.
#
if (sig_good):
reply_eid = site_eid.eid
reply_group = site_eid.group
else:
reply_eid = eid
reply_group = group
action = LISP_AUTH_FAILURE_ACTION
rloc_set = []
#endif
#
# When replying to a subscribe-request, return target EID and not
# maybe shorter matched EID-prefix regitered.
#
if (pubsub):
reply_eid = eid
reply_group = group
#endif
#
# If this Map-Request is also a subscription request, return same
# information in a Map-Notify.
#
packet = lisp_build_map_reply(reply_eid, reply_group, rloc_set,
nonce, action, ttl, map_request, None, enc, False)
if (pubsub):
lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc,
mr_sport, nonce, ttl, xtr_id)
else:
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, mr_sport)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# If there are no registered RLOCs, return.
#
rloc_count = len(site_eid.registered_rlocs)
if (rloc_count == 0):
lprint(("Requested EID {} found site '{}' with EID-prefix {} with " + \
"no registered RLOCs").format(green(eid_str, False), site_name,
green(prefix_str, False)))
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# Forward to ETR at registered site. We have to put in an ECM.
#
hash_address = map_request.target_eid if map_request.source_eid.is_null() \
else map_request.source_eid
hashval = map_request.target_eid.hash_address(hash_address)
hashval %= rloc_count
etr = site_eid.registered_rlocs[hashval]
if (etr.rloc.is_null()):
lprint(("Suppress forwarding Map-Request for EID {} at site '{}' " + \
"EID-prefix {}, no RLOC address").format(green(eid_str, False),
site_name, green(prefix_str, False)))
else:
lprint(("Forwarding Map-Request for EID {} to ETR {} at site '{}' " + \
"EID-prefix {}").format(green(eid_str, False),
red(etr.rloc.print_address(), False), site_name,
green(prefix_str, False)))
#
# Send ECM.
#
lisp_send_ecm(lisp_sockets, packet, map_request.source_eid, mr_sport,
map_request.target_eid, etr.rloc, to_etr=True)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#enddef
#
# lisp_ddt_process_map_request
#
# Do DDT-node processing of a Map-Request received from an Map-Resolver.
#
def lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source, port):
#
# Lookup target EID address in DDT cache.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
nonce = map_request.nonce
action = LISP_DDT_ACTION_NULL
#
# First check to see if EID is registered locally if we are a Map-Server.
# Otherwise, do DDT lookup.
#
ddt_entry = None
if (lisp_i_am_ms):
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None): return
if (site_eid.registered):
action = LISP_DDT_ACTION_MS_ACK
ttl = 1440
else:
eid, group, action = lisp_ms_compute_neg_prefix(eid, group)
action = LISP_DDT_ACTION_MS_NOT_REG
ttl = 1
#endif
else:
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry == None):
action = LISP_DDT_ACTION_NOT_AUTH
ttl = 0
lprint("DDT delegation entry not found for EID {}".format( \
green(eid_str, False)))
elif (ddt_entry.is_auth_prefix()):
#
# Check auth-prefix. That means there are no referrals.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE
ttl = 15
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint(("DDT delegation entry not found but auth-prefix {} " + \
"found for EID {}").format(ddt_entry_str,
green(eid_str, False)))
if (group.is_null()):
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
lisp_ddt_cache)
else:
group = lisp_ddt_compute_neg_prefix(group, ddt_entry,
lisp_ddt_cache)
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
ddt_entry.source_cache)
#endif
ddt_entry = None
else:
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint("DDT delegation entry {} found for EID {}".format( \
ddt_entry_str, green(eid_str, False)))
ttl = 1440
#endif
#endif
#
# Build and return a Map-Referral message to the source of the Map-Request.
#
packet = lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce)
nonce = map_request.nonce >> 32
if (map_request.nonce != 0 and nonce != 0xdfdf0e1d): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_find_negative_mask_len
#
# XOR the two addresses so we can find the first bit that is different. Then
# count the number of bits from the left that bit position is. That is the
# new mask-length. Compare to the neg-prefix mask-length we have found so
# far. If the new one is longer than the stored one so far, replace it.
#
# This function assumes the address size and the address-family are the same
# for 'eid' and 'entry_prefix'. Caller must make sure of that.
#
def lisp_find_negative_mask_len(eid, entry_prefix, neg_prefix):
diff_address = eid.hash_address(entry_prefix)
address_size = eid.addr_length() * 8
mask_len = 0
#
# The first set bit is the one that is different.
#
for mask_len in range(address_size):
bit_test = 1 << (address_size - mask_len - 1)
if (diff_address & bit_test): break
#endfor
if (mask_len > neg_prefix.mask_len): neg_prefix.mask_len = mask_len
return
#enddef
#
# lisp_neg_prefix_walk
#
# Callback routine to decide which prefixes should be considered by function
# lisp_find_negative_mask_len().
#
# 'entry' in this routine could be a lisp_ddt_entry() or a lisp_site_eid().
#
def lisp_neg_prefix_walk(entry, parms):
eid, auth_prefix, neg_prefix = parms
if (auth_prefix == None):
if (entry.eid.instance_id != eid.instance_id):
return([True, parms])
#endif
if (entry.eid.afi != eid.afi): return([True, parms])
else:
if (entry.eid.is_more_specific(auth_prefix) == False):
return([True, parms])
#endif
#endif
#
# Find bits that match.
#
lisp_find_negative_mask_len(eid, entry.eid, neg_prefix)
return([True, parms])
#enddef
#
# lisp_ddt_compute_neg_prefix
#
# Walk the DDT cache to compute the least specific prefix within the auth-
# prefix found.
#
def lisp_ddt_compute_neg_prefix(eid, ddt_entry, cache):
#
# Do not compute negative prefixes for distinguished-names or geo-prefixes.
#
if (eid.is_binary() == False): return(eid)
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
auth_prefix_str = ddt_entry.print_eid_tuple()
auth_prefix = ddt_entry.eid
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from ddt-cache for EID {} " + \
"using auth-prefix {} is {}").format(green(eid.print_address(), False),
auth_prefix_str, neg_prefix.print_prefix()))
return(neg_prefix)
#enddef
#
# lisp_ms_compute_neg_prefix
#
# From the site cache and the DDT cache, compute a negative EID-prefix to not
# be shorter than a configured authoritative-prefix.
#
def lisp_ms_compute_neg_prefix(eid, group):
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
gneg_prefix = lisp_address(group.afi, "", 0, 0)
gneg_prefix.copy_address(group)
gneg_prefix.mask_len = 0
auth_prefix = None
#
# Look for auth-prefix in DDT cache. If not found, we return the host
# based EID in a negative Map-Referral, action non-authoritative.
#
if (group.is_null()):
ddt_entry = lisp_ddt_cache.lookup_cache(eid, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
cache = lisp_sites_by_eid
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.eid
else:
ddt_entry = lisp_ddt_cache.lookup_cache(group, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.group
group, auth_prefix, gneg_prefix = lisp_sites_by_eid.walk_cache( \
lisp_neg_prefix_walk, (group, auth_prefix, gneg_prefix))
gneg_prefix.mask_address(gneg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for " + \
"group EID {} using auth-prefix {} is {}").format( \
group.print_address(), auth_prefix.print_prefix() if \
(auth_prefix != None) else "'not found'",
gneg_prefix.print_prefix()))
cache = ddt_entry.source_cache
#endif
#
# Return the auth-prefix if we found it in the DDT cache.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE if (auth_prefix != None) else \
LISP_DDT_ACTION_NOT_AUTH
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for EID {} " + \
"using auth-prefix {} is {}").format( \
green(eid.print_address(), False),
auth_prefix.print_prefix() if (auth_prefix != None) else \
"'not found'", neg_prefix.print_prefix()))
return([neg_prefix, gneg_prefix, action])
#enddef
#
# lisp_ms_send_map_referral
#
# This function is for a Map-Server to send a Map-Referral to a requesting
# node.
#
def lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source, port,
action, eid_prefix, group_prefix):
eid = map_request.target_eid
group = map_request.target_group
nonce = map_request.nonce
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
#
# Build Map-Server specific Map-Referral.
#
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
incomplete = False
#
# Figure out what action code, EID-prefix, and ttl to return in the EID-
# record. Temporary return requested prefix until we have lisp_ms_compute_
# neg_prefix() working.
#
if (action == LISP_DDT_ACTION_SITE_NOT_FOUND):
eid_prefix, group_prefix, action = lisp_ms_compute_neg_prefix(eid,
group)
ttl = 15
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG): ttl = 1
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
if (action == LISP_DDT_ACTION_DELEGATION_HOLE): ttl = 15
if (action == LISP_DDT_ACTION_NOT_AUTH): ttl = 0
is_ms_peer = False
rloc_count = 0
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry != None):
rloc_count = len(ddt_entry.delegation_set)
is_ms_peer = ddt_entry.is_ms_peer_entry()
ddt_entry.map_referrals_sent += 1
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (is_ms_peer == False)
#endif
#
# Store info in EID-record.
#
eid_record = lisp_eid_record()
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.eid = eid_prefix
eid_record.group= group_prefix
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
#
# Build referral-set.
#
if (rloc_count != 0):
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#endif
#
# Build packet and send Map-Referral message to the source of the
# Map-Request.
#
if (map_request.nonce != 0): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_send_negative_map_reply
#
# Send a negative Map-Reply. This is one with a specific action code and zero
# RLOCs in the locator-set.
#
def lisp_send_negative_map_reply(sockets, eid, group, nonce, dest, port, ttl,
xtr_id, pubsub):
lprint("Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}". \
format(lisp_print_eid_tuple(eid, group), lisp_hex_string(nonce),
red(dest.print_address(), False)))
action = LISP_NATIVE_FORWARD_ACTION if group.is_null() else \
LISP_DROP_ACTION
#
# If this is a crypto-EID, return LISP_SEND_MAP_REQUEST_ACTION.
#
if (lisp_get_eid_hash(eid) != None):
action = LISP_SEND_MAP_REQUEST_ACTION
#endif
packet = lisp_build_map_reply(eid, group, [], nonce, action, ttl, None,
None, False, False)
#
# Send Map-Notify if this Map-Request is a subscribe-request.
#
if (pubsub):
lisp_process_pubsub(sockets, packet, eid, dest, port, nonce, ttl,
xtr_id)
else:
lisp_send_map_reply(sockets, packet, dest, port)
#endif
return
#enddef
#
# lisp_retransmit_ddt_map_request
#
# Have the Map-Resolver transmit a DDT Map-Request.
#
def lisp_retransmit_ddt_map_request(mr):
seid_str = mr.mr_source.print_address()
deid_str = mr.print_eid_tuple()
nonce = mr.nonce
#
# Get referral-node for who we sent Map-Request to last time. We need
# to increment, the no-response timer.
#
if (mr.last_request_sent_to):
last_node = mr.last_request_sent_to.print_address()
ref = lisp_referral_cache_lookup(mr.last_cached_prefix[0],
mr.last_cached_prefix[1], True)
if (ref and last_node in ref.referral_set):
ref.referral_set[last_node].no_responses += 1
#endif
#endif
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (mr.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("DDT Map-Request retry limit reached for EID {}, nonce 0x{}". \
format(green(deid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
mr.retry_count += 1
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format(bold("Map-Request", False), "P" if mr.from_pitr else "",
red(mr.itr.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Do referral lookup and send the DDT Map-Request again.
#
lisp_send_ddt_map_request(mr, False)
#
# Restart retransmit timer.
#
mr.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [mr])
mr.retransmit_timer.start()
return
#enddef
#
# lisp_get_referral_node
#
# Get a referral-node of highest priority that is in the up state. Returns
# class lisp_referral_node().
#
def lisp_get_referral_node(referral, source_eid, dest_eid):
#
# Build list of high-priority up referral-nodes.
#
ref_set = []
for ref_node in list(referral.referral_set.values()):
if (ref_node.updown == False): continue
if (len(ref_set) == 0 or ref_set[0].priority == ref_node.priority):
ref_set.append(ref_node)
elif (ref_set[0].priority > ref_node.priority):
ref_set = []
ref_set.append(ref_node)
#endif
#endfor
ref_count = len(ref_set)
if (ref_count == 0): return(None)
hashval = dest_eid.hash_address(source_eid)
hashval = hashval % ref_count
return(ref_set[hashval])
#enddef
#
# lisp_send_ddt_map_request
#
# Send a DDT Map-Request based on a EID lookup in the referral cache.
#
def lisp_send_ddt_map_request(mr, send_to_root):
lisp_sockets = mr.lisp_sockets
nonce = mr.nonce
itr = mr.itr
mr_source = mr.mr_source
eid_str = mr.print_eid_tuple()
#
# Check if the maximum allowable Map-Requests have been sent for this
# map-request-queue entry.
#
if (mr.send_count == 8):
lprint("Giving up on map-request-queue entry {}, nonce 0x{}".format( \
green(eid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
#
# If caller wants us to use the root versus best match lookup. We only
# so this once per Map-Request queue entry.
#
if (send_to_root):
lookup_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
lookup_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
mr.tried_root = True
lprint("Jumping up to root for EID {}".format(green(eid_str, False)))
else:
lookup_eid = mr.eid
lookup_group = mr.group
#endif
#
# Do longest match on EID into DDT referral cache.
#
referral = lisp_referral_cache_lookup(lookup_eid, lookup_group, False)
if (referral == None):
lprint("No referral cache entry found")
lisp_send_negative_map_reply(lisp_sockets, lookup_eid, lookup_group,
nonce, itr, mr.sport, 15, None, False)
return
#endif
ref_str = referral.print_eid_tuple()
lprint("Found referral cache entry {}, referral-type: {}".format(ref_str,
referral.print_referral_type()))
ref_node = lisp_get_referral_node(referral, mr_source, mr.eid)
if (ref_node == None):
lprint("No reachable referral-nodes found")
mr.dequeue_map_request()
lisp_send_negative_map_reply(lisp_sockets, referral.eid,
referral.group, nonce, itr, mr.sport, 1, None, False)
return
#endif
lprint("Send DDT Map-Request to {} {} for EID {}, nonce 0x{}". \
format(ref_node.referral_address.print_address(),
referral.print_referral_type(), green(eid_str, False),
lisp_hex_string(nonce)))
#
# Encapsulate Map-Request and send out.
#
to_ms = (referral.referral_type == LISP_DDT_ACTION_MS_REFERRAL or
referral.referral_type == LISP_DDT_ACTION_MS_ACK)
lisp_send_ecm(lisp_sockets, mr.packet, mr_source, mr.sport, mr.eid,
ref_node.referral_address, to_ms=to_ms, ddt=True)
#
# Do some stats.
#
mr.last_request_sent_to = ref_node.referral_address
mr.last_sent = lisp_get_timestamp()
mr.send_count += 1
ref_node.map_requests_sent += 1
return
#enddef
#
# lisp_mr_process_map_request
#
# Process a Map-Request received by an ITR. We need to forward this Map-Request
# to the longest matched referral from the referral-cache.
#
def lisp_mr_process_map_request(lisp_sockets, packet, map_request, ecm_source,
sport, mr_source):
eid = map_request.target_eid
group = map_request.target_group
deid_str = map_request.print_eid_tuple()
seid_str = mr_source.print_address()
nonce = map_request.nonce
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format("P" if map_request.pitr_bit else "",
red(ecm_source.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Queue the Map-Request. We need to reliably transmit it.
#
mr = lisp_ddt_map_request(lisp_sockets, packet, eid, group, nonce)
mr.packet = packet
mr.itr = ecm_source
mr.mr_source = mr_source
mr.sport = sport
mr.from_pitr = map_request.pitr_bit
mr.queue_map_request()
lisp_send_ddt_map_request(mr, False)
return
#enddef
#
# lisp_process_map_request
#
# Process received Map-Request as a Map-Server or an ETR.
#
def lisp_process_map_request(lisp_sockets, packet, ecm_source, ecm_port,
mr_source, mr_port, ddt_request, ttl, timestamp):
orig_packet = packet
map_request = lisp_map_request()
packet = map_request.decode(packet, mr_source, mr_port)
if (packet == None):
lprint("Could not decode Map-Request packet")
return
#endif
map_request.print_map_request()
#
# If RLOC-probe request, process separately.
#
if (map_request.rloc_probe):
lisp_process_rloc_probe_request(lisp_sockets, map_request, mr_source,
mr_port, ttl, timestamp)
return
#endif
#
# Process SMR.
#
if (map_request.smr_bit):
lisp_process_smr(map_request)
#endif
#
# Process SMR-invoked Map-Request.
#
if (map_request.smr_invoked_bit):
lisp_process_smr_invoked_request(map_request)
#endif
#
# Do ETR processing of the Map-Request if we found a database-mapping.
#
if (lisp_i_am_etr):
lisp_etr_process_map_request(lisp_sockets, map_request, mr_source,
mr_port, ttl, timestamp)
#endif
#
# Do Map-Server processing of the Map-Request.
#
if (lisp_i_am_ms):
packet = orig_packet
eid, group, ddt_action = lisp_ms_process_map_request(lisp_sockets,
orig_packet, map_request, mr_source, mr_port, ecm_source)
if (ddt_request):
lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source,
ecm_port, ddt_action, eid, group)
#endif
return
#endif
#
# Map-Request is from an ITR destined to a Map-Resolver.
#
if (lisp_i_am_mr and not ddt_request):
lisp_mr_process_map_request(lisp_sockets, orig_packet, map_request,
ecm_source, mr_port, mr_source)
#endif
#
# Do DDT-node processing of the Map-Request.
#
if (lisp_i_am_ddt or ddt_request):
packet = orig_packet
lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source,
ecm_port)
#endif
return
#enddef
#
# lisp_store_mr_stats
#
# Store counter and timing stats for the map-resolver that just sent us a
# negative Map-Reply.
#
def lisp_store_mr_stats(source, nonce):
mr = lisp_get_map_resolver(source, None)
if (mr == None): return
#
# Count and record timestamp.
#
mr.neg_map_replies_received += 1
mr.last_reply = lisp_get_timestamp()
#
# For every 100 replies, reset the total_rtt so we can get a new average.
#
if ((mr.neg_map_replies_received % 100) == 0): mr.total_rtt = 0
#
# If Map-Reply matches stored nonce, then we can do an RTT calculation.
#
if (mr.last_nonce == nonce):
mr.total_rtt += (time.time() - mr.last_used)
mr.last_nonce = 0
#endif
if ((mr.neg_map_replies_received % 10) == 0): mr.last_nonce = 0
return
#enddef
#
# lisp_process_map_reply
#
# Process received Map-Reply.
#
def lisp_process_map_reply(lisp_sockets, packet, source, ttl, itr_in_ts):
global lisp_map_cache
map_reply = lisp_map_reply()
packet = map_reply.decode(packet)
if (packet == None):
lprint("Could not decode Map-Reply packet")
return
#endif
map_reply.print_map_reply()
#
# Process each EID record in Map-Reply message.
#
rloc_key_change = None
for i in range(map_reply.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Reply packet")
return
#endif
eid_record.print_record(" ", False)
#
# If negative Map-Reply, see if from a Map-Resolver, do some counting
# and timing stats.
#
if (eid_record.rloc_count == 0):
lisp_store_mr_stats(source, map_reply.nonce)
#endif
multicast = (eid_record.group.is_null() == False)
#
# If this is a (0.0.0.0/0, G) with drop-action, we don't want to
# cache more-specific (S,G) entry. It is a startup timing problem.
#
if (lisp_decent_push_configured):
action = eid_record.action
if (multicast and action == LISP_DROP_ACTION):
if (eid_record.eid.is_local()): continue
#endif
#endif
#
# Some RLOC-probe Map-Replies may have no EID value in the EID-record.
# Like from RTRs or PETRs.
#
if (multicast == False and eid_record.eid.is_null()): continue
#
# Do not lose state for other RLOCs that may be stored in an already
# cached map-cache entry.
#
if (multicast):
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
else:
mc = lisp_map_cache.lookup_cache(eid_record.eid, True)
#endif
new_mc = (mc == None)
#
# Do not let map-cache entries from Map-Replies override gleaned
# entries.
#
if (mc == None):
glean, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (glean): continue
else:
if (mc.gleaned): continue
#endif
#
# Process each RLOC record in EID record.
#
rloc_set = []
mrloc = None
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
rloc_record.keys = map_reply.keys
packet = rloc_record.decode(packet, map_reply.nonce)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Reply packet")
return
#endif
rloc_record.print_record(" ")
old_rloc = None
if (mc): old_rloc = mc.get_rloc(rloc_record.rloc)
if (old_rloc):
rloc = old_rloc
else:
rloc = lisp_rloc()
#endif
#
# Copy RLOC data from record, add to locator-set. Check to see
# if the RLOC has been translated by a NAT. If so, go get the
# translated port and store in rloc entry.
#
port = rloc.store_rloc_from_record(rloc_record, map_reply.nonce,
source)
rloc.echo_nonce_capable = map_reply.echo_nonce_capable
if (rloc.echo_nonce_capable):
addr_str = rloc.rloc.print_address_no_iid()
if (lisp_get_echo_nonce(None, addr_str) == None):
lisp_echo_nonce(addr_str)
#endif
#endif
#
# Add itr-in timestamp if telemetry data included in RLOC record..
#
if (rloc.json):
if (lisp_is_json_telemetry(rloc.json.json_string)):
js = rloc.json.json_string
js = lisp_encode_telemetry(js, ii=itr_in_ts)
rloc.json.json_string = js
#endif
#endif
#
# Process state for RLOC-probe reply from this specific RLOC. And
# update RLOC state for map-cache entry. Ignore an RLOC with a
# different address-family of the recieved packet. The ITR really
# doesn't know it can reach the RLOC unless it probes for that
# address-family.
#
if (map_reply.rloc_probe and rloc_record.probe_bit):
if (rloc.rloc.afi == source.afi):
lisp_process_rloc_probe_reply(rloc, source, port,
map_reply, ttl, mrloc)
#endif
if (rloc.rloc.is_multicast_address()): mrloc = rloc
#endif
#
# Append to rloc-set array to be stored in map-cache entry.
#
rloc_set.append(rloc)
#
# Did keys change for thie RLOC, flag it if so.
#
if (lisp_data_plane_security and rloc.rloc_recent_rekey()):
rloc_key_change = rloc
#endif
#endfor
#
# If the map-cache entry is for an xTR behind a NAT, we'll find an
# RTR RLOC (which is priority 254). Store private RLOCs that may
# come along with the RTR RLOC because the destination RLOC could
# be behind the same NAT as this ITR. This ITR, however could be
# behind another NAT or in public space. We want to mark the
# private address RLOC unreachable for the two later cases.
#
if (map_reply.rloc_probe == False and lisp_nat_traversal):
new_set = []
log_set = []
for rloc in rloc_set:
#
# Set initial state for private RLOCs to UNREACH and test
# with RLOC-probes if up behind same NAT.
#
if (rloc.rloc.is_private_address()):
rloc.priority = 1
rloc.state = LISP_RLOC_UNREACH_STATE
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
continue
#endif
#
# RTR should not put RTR RLOC in map-cache. But xTRs do. None
# RTR RLOCs should only go in the RTR map-cache.
#
if (rloc.priority == 254 and lisp_i_am_rtr == False):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
if (rloc.priority != 254 and lisp_i_am_rtr):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
#endfor
if (log_set != []):
rloc_set = new_set
lprint("NAT-traversal optimized RLOC-set: {}".format(log_set))
#endif
#endif
#
# If any RLOC-records do not have RLOCs, don't put them in the map-
# cache.
#
new_set = []
for rloc in rloc_set:
if (rloc.json != None): continue
new_set.append(rloc)
#endfor
if (new_set != []):
count = len(rloc_set) - len(new_set)
lprint("Pruning {} no-address RLOC-records for map-cache".format( \
count))
rloc_set = new_set
#endif
#
# If this is an RLOC-probe reply and the RLOCs are registered with
# merge semantics, this Map-Reply may not include the other RLOCs.
# In this case, do not wipe out the other RLOCs. Get them from the
# existing entry.
#
if (map_reply.rloc_probe and mc != None): rloc_set = mc.rloc_set
#
# If we are overwriting the rloc-set cached in the map-cache entry,
# then remove the old rloc pointers from the RLOC-probe list.
#
rloc_set_change = new_mc
if (mc and rloc_set != mc.rloc_set):
mc.delete_rlocs_from_rloc_probe_list()
rloc_set_change = True
#endif
#
# Add to map-cache. If this is a replace, save uptime.
#
uptime = mc.uptime if (mc) else None
if (mc == None):
mc = lisp_mapping(eid_record.eid, eid_record.group, rloc_set)
mc.mapping_source = source
#
# If this is a multicast map-cache entry in an RTR, set map-cache
# TTL small so Map-Requests can be sent more often to capture
# RLE changes.
#
if (lisp_i_am_rtr and eid_record.group.is_null() == False):
mc.map_cache_ttl = LISP_MCAST_TTL
else:
mc.map_cache_ttl = eid_record.store_ttl()
#endif
mc.action = eid_record.action
mc.add_cache(rloc_set_change)
#endif
add_or_replace = "Add"
if (uptime):
mc.uptime = uptime
mc.refresh_time = lisp_get_timestamp()
add_or_replace = "Replace"
#endif
lprint("{} {} map-cache with {} RLOCs".format(add_or_replace,
green(mc.print_eid_tuple(), False), len(rloc_set)))
#
# If there were any changes to the RLOC-set or the keys for any
# existing RLOC in the RLOC-set, tell the external data-plane.
#
if (lisp_ipc_dp_socket and rloc_key_change != None):
lisp_write_ipc_keys(rloc_key_change)
#endif
#
# Send RLOC-probe to highest priority RLOCs if this is a new map-cache
# entry. But if any of the RLOCs were used before in other map-cache
# entries, no need to send RLOC-probes.
#
if (new_mc):
probe = bold("RLOC-probe", False)
for rloc in mc.best_rloc_set:
addr_str = red(rloc.rloc.print_address_no_iid(), False)
lprint("Trigger {} to {}".format(probe, addr_str))
lisp_send_map_request(lisp_sockets, 0, mc.eid, mc.group, rloc)
#endfor
#endif
#endfor
return
#enddef
#
# lisp_compute_auth
#
# Create HMAC hash from packet contents store in lisp_map_register() and
# encode in packet buffer.
#
def lisp_compute_auth(packet, map_register, password):
if (map_register.alg_id == LISP_NONE_ALG_ID): return(packet)
packet = map_register.zero_auth(packet)
hashval = lisp_hash_me(packet, map_register.alg_id, password, False)
#
# Store packed hash value in lisp_map_register().
#
map_register.auth_data = hashval
packet = map_register.encode_auth(packet)
return(packet)
#enddef
#
# lisp_hash_me
#
# Call HMAC hashing code from multiple places. Returns hash value.
#
def lisp_hash_me(packet, alg_id, password, do_hex):
if (alg_id == LISP_NONE_ALG_ID): return(True)
if (alg_id == LISP_SHA_1_96_ALG_ID):
hashalg = hashlib.sha1
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
hashalg = hashlib.sha256
#endif
if (do_hex):
hashval = hmac.new(password.encode(), packet, hashalg).hexdigest()
else:
hashval = hmac.new(password.encode(), packet, hashalg).digest()
#endif
return(hashval)
#enddef
#
# lisp_verify_auth
#
# Compute sha1 or sha2 hash over Map-Register packet and compare with one
# transmitted in packet that is stored in class lisp_map_register.
#
def lisp_verify_auth(packet, alg_id, auth_data, password):
if (alg_id == LISP_NONE_ALG_ID): return(True)
hashval = lisp_hash_me(packet, alg_id, password, True)
matched = (hashval == auth_data)
#
# Print differences if hashes if they do not match.
#
if (matched == False):
lprint("Hashed value: {} does not match packet value: {}".format( \
hashval, auth_data))
#endif
return(matched)
#enddef
#
# lisp_retransmit_map_notify
#
# Retransmit the already build Map-Notify message.
#
def lisp_retransmit_map_notify(map_notify):
dest = map_notify.etr
port = map_notify.etr_port
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (map_notify.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("Map-Notify with nonce 0x{} retry limit reached for ETR {}". \
format(map_notify.nonce_key, red(dest.print_address(), False)))
key = map_notify.nonce_key
if (key in lisp_map_notify_queue):
map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
try:
lisp_map_notify_queue.pop(key)
except:
lprint("Key not found in Map-Notify queue")
#endtry
#endif
return
#endif
lisp_sockets = map_notify.lisp_sockets
map_notify.retry_count += 1
lprint("Retransmit {} with nonce 0x{} to xTR {}, retry {}".format( \
bold("Map-Notify", False), map_notify.nonce_key,
red(dest.print_address(), False), map_notify.retry_count))
lisp_send_map_notify(lisp_sockets, map_notify.packet, dest, port)
if (map_notify.site): map_notify.site.map_notifies_sent += 1
#
# Restart retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_merged_map_notify
#
# Send Map-Notify with a merged RLOC-set to each ETR in the RLOC-set.
#
def lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record):
#
# Build EID-record once.
#
eid_record.rloc_count = len(parent.registered_rlocs)
packet_record = eid_record.encode()
eid_record.print_record("Merged Map-Notify ", False)
#
# Buld RLOC-records for merged RLOC-set.
#
for xtr in parent.registered_rlocs:
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(xtr)
rloc_record.local_bit = True
rloc_record.probe_bit = False
rloc_record.reach_bit = True
packet_record += rloc_record.encode()
rloc_record.print_record(" ")
del(rloc_record)
#endfor
#
# Build Map-Notify for each xTR that needs to receive the Map-Notify.
#
for xtr in parent.registered_rlocs:
dest = xtr.rloc
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
key_id = map_register.key_id
map_notify.key_id = key_id
map_notify.alg_id = map_register.alg_id
map_notify.auth_len = map_register.auth_len
map_notify.nonce = map_register.nonce
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(dest)
map_notify.etr_port = map_register.sport
map_notify.site = parent.site
packet = map_notify.encode(packet_record, parent.site.auth_key[key_id])
map_notify.print_notify()
#
# Put Map-Notify state on retransmission queue.
#
key = map_notify.nonce_key
if (key in lisp_map_notify_queue):
remove = lisp_map_notify_queue[key]
remove.retransmit_timer.cancel()
del(remove)
#endif
lisp_map_notify_queue[key] = map_notify
#
# Send out.
#
lprint("Send merged Map-Notify to ETR {}".format( \
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
parent.site.map_notifies_sent += 1
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
#endfor
return
#enddef
#
# lisp_build_map_notify
#
# Setup retransmission queue entry to send the first Map-Notify.
#
def lisp_build_map_notify(lisp_sockets, eid_records, eid_list, record_count,
source, port, nonce, key_id, alg_id, auth_len, site, map_register_ack):
key = lisp_hex_string(nonce) + source.print_address()
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(eid_list)
if (key in lisp_map_notify_queue):
map_notify = lisp_map_notify_queue[key]
s = red(source.print_address_no_iid(), False)
lprint("Map-Notify with nonce 0x{} pending for xTR {}".format( \
lisp_hex_string(map_notify.nonce), s))
return
#endif
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = record_count
key_id = key_id
map_notify.key_id = key_id
map_notify.alg_id = alg_id
map_notify.auth_len = auth_len
map_notify.nonce = nonce
map_notify.nonce_key = lisp_hex_string(nonce)
map_notify.etr.copy_address(source)
map_notify.etr_port = port
map_notify.site = site
map_notify.eid_list = eid_list
#
# Put Map-Notify state on retransmission queue.
#
if (map_register_ack == False):
key = map_notify.nonce_key
lisp_map_notify_queue[key] = map_notify
#endif
if (map_register_ack):
lprint("Send Map-Notify to ack Map-Register")
else:
lprint("Send Map-Notify for RLOC-set change")
#endif
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, site.auth_key[key_id])
map_notify.print_notify()
if (map_register_ack == False):
eid_record = lisp_eid_record()
eid_record.decode(eid_records)
eid_record.print_record(" ", False)
#endif
#
# Send out.
#
lisp_send_map_notify(lisp_sockets, packet, map_notify.etr, port)
site.map_notifies_sent += 1
if (map_register_ack): return
#
# Set retransmit timer if this is an unsolcited Map-Notify. Otherwise,
# we are acknowledging a Map-Register and the registerer is not going
# to send a Map-Notify-Ack so we shouldn't expect one.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_map_notify_ack
#
# Change Map-Notify message to have a new type (Map-Notify-Ack) and
# reauthenticate message.
#
def lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms):
map_notify.map_notify_ack = True
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, ms.password)
map_notify.print_notify()
#
# Send the Map-Notify-Ack.
#
dest = ms.map_server
lprint("Send Map-Notify-Ack to {}".format(
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_multicast_map_notify
#
# Send a Map-Notify message to an xTR for the supplied (S,G) passed into this
# function.
#
def lisp_send_multicast_map_notify(lisp_sockets, site_eid, eid_list, xtr):
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
map_notify.nonce = lisp_get_control_nonce()
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(xtr)
map_notify.etr_port = LISP_CTRL_PORT
map_notify.eid_list = eid_list
key = map_notify.nonce_key
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(map_notify.eid_list)
if (key in lisp_map_notify_queue):
map_notify = lisp_map_notify_queue[key]
lprint("Map-Notify with nonce 0x{} pending for ITR {}".format( \
map_notify.nonce, red(xtr.print_address_no_iid(), False)))
return
#endif
#
# Put Map-Notify state on retransmission queue.
#
lisp_map_notify_queue[key] = map_notify
#
# Determine if there are any RTRs in the RLOC-set for this (S,G).
#
rtrs_exist = site_eid.rtrs_in_rloc_set()
if (rtrs_exist):
if (site_eid.is_rtr_in_rloc_set(xtr)): rtrs_exist = False
#endif
#
# Build EID-record.
#
eid_record = lisp_eid_record()
eid_record.record_ttl = 1440
eid_record.eid.copy_address(site_eid.eid)
eid_record.group.copy_address(site_eid.group)
eid_record.rloc_count = 0
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
eid_record.rloc_count += 1
#endfor
packet = eid_record.encode()
#
# Print contents of Map-Notify.
#
map_notify.print_notify()
eid_record.print_record(" ", False)
#
# Build locator-set with only RTR RLOCs if they exist.
#
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.local_bit = True
rloc_record.probe_bit = False
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#
# Encode it.
#
packet = map_notify.encode(packet, "")
if (packet == None): return
#
# Send Map-Notify to xTR.
#
lisp_send_map_notify(lisp_sockets, packet, xtr, LISP_CTRL_PORT)
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_queue_multicast_map_notify
#
# This funciton will look for the ITRs in the local site cache.
#
def lisp_queue_multicast_map_notify(lisp_sockets, rle_list):
null_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
for sg in rle_list:
sg_site_eid = lisp_site_eid_lookup(sg[0], sg[1], True)
if (sg_site_eid == None): continue
#
# (S,G) RLOC-set could be empty when last RLE goes away. We will have
# to search all individual registrations searching for RTRs.
#
# We store in a dictonary array so we can remove duplicates.
#
sg_rloc_set = sg_site_eid.registered_rlocs
if (len(sg_rloc_set) == 0):
temp_set = {}
for se in list(sg_site_eid.individual_registrations.values()):
for rloc_entry in se.registered_rlocs:
if (rloc_entry.is_rtr() == False): continue
temp_set[rloc_entry.rloc.print_address()] = rloc_entry
#endfor
#endfor
sg_rloc_set = list(temp_set.values())
#endif
#
# If this is a (0.0.0.0/0, G) or a (0::/0, G), we send a Map-Notify
# to all members (all RLOCs in the sg_rloc_set.
#
notify = []
found_rtrs = False
if (sg_site_eid.eid.address == 0 and sg_site_eid.eid.mask_len == 0):
notify_str = []
rle_nodes = []
if (len(sg_rloc_set) != 0 and sg_rloc_set[0].rle != None):
rle_nodes = sg_rloc_set[0].rle.rle_nodes
#endif
for rle_node in rle_nodes:
notify.append(rle_node.address)
notify_str.append(rle_node.address.print_address_no_iid())
#endfor
lprint("Notify existing RLE-nodes {}".format(notify_str))
else:
#
# If the (S,G) has an RTR registered, then we will send a
# Map-Notify to the RTR instead the ITRs of the source-site.
#
for rloc_entry in sg_rloc_set:
if (rloc_entry.is_rtr()): notify.append(rloc_entry.rloc)
#endfor
#
# If no RTRs were found, get ITRs from source-site.
#
found_rtrs = (len(notify) != 0)
if (found_rtrs == False):
site_eid = lisp_site_eid_lookup(sg[0], null_group, False)
if (site_eid == None): continue
for rloc_entry in site_eid.registered_rlocs:
if (rloc_entry.rloc.is_null()): continue
notify.append(rloc_entry.rloc)
#endfor
#endif
#
# No ITRs or RTRs fond.
#
if (len(notify) == 0):
lprint("No ITRs or RTRs found for {}, Map-Notify suppressed". \
format(green(sg_site_eid.print_eid_tuple(), False)))
continue
#endif
#endif
#
# Send multicast Map-Notify to either ITR-list or RTR-list.
#
for xtr in notify:
lprint("Build Map-Notify to {}TR {} for {}".format("R" if \
found_rtrs else "x", red(xtr.print_address_no_iid(), False),
green(sg_site_eid.print_eid_tuple(), False)))
el = [sg_site_eid.print_eid_tuple()]
lisp_send_multicast_map_notify(lisp_sockets, sg_site_eid, el, xtr)
time.sleep(.001)
#endfor
#endfor
return
#enddef
#
# lisp_find_sig_in_rloc_set
#
# Look for a "signature" key in a JSON RLOC-record. Return None, if not found.
# Return RLOC record if found.
#
def lisp_find_sig_in_rloc_set(packet, rloc_count):
for i in range(rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
json_sig = rloc_record.json
if (json_sig == None): continue
try:
json_sig = json.loads(json_sig.json_string)
except:
lprint("Found corrupted JSON signature")
continue
#endtry
if ("signature" not in json_sig): continue
return(rloc_record)
#endfor
return(None)
#enddef
#
# lisp_get_eid_hash
#
# From an EID, return EID hash value. Here is an example where all but the
# high-order byte is the EID hash for each hash-length:
#
# EID: fd4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430
# EID-hash: 4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430 eid_hash_len = 120
# EID-hash: 6dbd:3799:48e1:c6a2:9430 eid_hash_len = 80
#
# Note when an eid-prefix in lisp_eid_hashes[] has an instance-id of -1, it
# means the eid-prefix is used for all EIDs from any instance-id.
#
# Returns a string with hex digits between colons and the hash length in bits.
# Returns None if the IPv6 EID is not a crypto-hash address. These addresses
# are not authenticated.
#
def lisp_get_eid_hash(eid):
hash_mask_len = None
for eid_prefix in lisp_eid_hashes:
#
# For wildcarding the instance-ID.
#
iid = eid_prefix.instance_id
if (iid == -1): eid_prefix.instance_id = eid.instance_id
ms = eid.is_more_specific(eid_prefix)
eid_prefix.instance_id = iid
if (ms):
hash_mask_len = 128 - eid_prefix.mask_len
break
#endif
#endfor
if (hash_mask_len == None): return(None)
address = eid.address
eid_hash = ""
for i in range(0, old_div(hash_mask_len, 16)):
addr = address & 0xffff
addr = hex(addr)[2::]
eid_hash = addr.zfill(4) + ":" + eid_hash
address >>= 16
#endfor
if (hash_mask_len % 16 != 0):
addr = address & 0xff
addr = hex(addr)[2::]
eid_hash = addr.zfill(2) + ":" + eid_hash
#endif
return(eid_hash[0:-1])
#enddef
#
# lisp_lookup_public_key
#
# Given an EID, do a mapping system lookup for a distinguished-name EID
# 'hash-<cga-hash>' to obtain the public-key from an RLOC-record.
#
# Return [hash_id, pubkey, True/False]. Values can be of value None but last
# boolean argument is if the hash lookup was found.
#
def lisp_lookup_public_key(eid):
iid = eid.instance_id
#
# Parse out CGA hash to do public-key lookup with instance-ID and hash
# as a distinguished-name EID.
#
pubkey_hash = lisp_get_eid_hash(eid)
if (pubkey_hash == None): return([None, None, False])
pubkey_hash = "hash-" + pubkey_hash
hash_eid = lisp_address(LISP_AFI_NAME, pubkey_hash, len(pubkey_hash), iid)
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
#
# Do lookup in local instance-ID.
#
site_eid = lisp_site_eid_lookup(hash_eid, group, True)
if (site_eid == None): return([hash_eid, None, False])
#
# Look for JSON RLOC with key "public-key".
#
pubkey = None
for rloc in site_eid.registered_rlocs:
json_pubkey = rloc.json
if (json_pubkey == None): continue
try:
json_pubkey = json.loads(json_pubkey.json_string)
except:
lprint("Registered RLOC JSON format is invalid for {}".format( \
pubkey_hash))
return([hash_eid, None, False])
#endtry
if ("public-key" not in json_pubkey): continue
pubkey = json_pubkey["public-key"]
break
#endfor
return([hash_eid, pubkey, True])
#enddef
#
# lisp_verify_cga_sig
#
# Verify signature of an IPv6 CGA-based EID if the public-key hash exists
# in the local mapping database (with same instance-ID).
#
def lisp_verify_cga_sig(eid, rloc_record):
#
# Use signature-eid if in JSON string. Otherwise, Crypto-EID is signature-
# EID.
#
sig = json.loads(rloc_record.json.json_string)
if (lisp_get_eid_hash(eid)):
sig_eid = eid
elif ("signature-eid" in sig):
sig_eid_str = sig["signature-eid"]
sig_eid = lisp_address(LISP_AFI_IPV6, sig_eid_str, 0, 0)
else:
lprint(" No signature-eid found in RLOC-record")
return(False)
#endif
#
# Lookup CGA hash in mapping datbase to get public-key.
#
hash_eid, pubkey, lookup_good = lisp_lookup_public_key(sig_eid)
if (hash_eid == None):
eid_str = green(sig_eid.print_address(), False)
lprint(" Could not parse hash in EID {}".format(eid_str))
return(False)
#endif
found = "found" if lookup_good else bold("not found", False)
eid_str = green(hash_eid.print_address(), False)
lprint(" Lookup for crypto-hashed EID {} {}".format(eid_str, found))
if (lookup_good == False): return(False)
if (pubkey == None):
lprint(" RLOC-record with public-key not found")
return(False)
#endif
pubkey_str = pubkey[0:8] + "..." + pubkey[-8::]
lprint(" RLOC-record with public-key '{}' found".format(pubkey_str))
#
# Get signature from RLOC-record in a form to let key.verify() do its
# thing.
#
sig_str = sig["signature"]
try:
sig = binascii.a2b_base64(sig_str)
except:
lprint(" Incorrect padding in signature string")
return(False)
#endtry
sig_len = len(sig)
if (sig_len & 1):
lprint(" Signature length is odd, length {}".format(sig_len))
return(False)
#endif
#
# The signature is over the following string: "[<iid>]<eid>".
#
sig_data = sig_eid.print_address()
#
# Verify signature of CGA and public-key.
#
pubkey = binascii.a2b_base64(pubkey)
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
bad = bold("Bad public-key", False)
lprint(" {}, not in PEM format".format(bad))
return(False)
#endtry
#
# The hashfunc must be supplied to get signature interoperability between
# a Go signer an a Python verifier. The signature data must go through
# a sha256 hash first. Python signer must use:
#
# ecdsa.SigningKey.sign(sig_data, hashfunc=hashlib.sha256)
#
# Note to use sha256 you need a curve of NIST256p.
#
try:
good = key.verify(sig, sig_data.encode(), hashfunc=hashlib.sha256)
except:
lprint(" Signature library failed for signature data '{}'".format( \
sig_data))
lprint(" Signature used '{}'".format(sig_str))
return(False)
#endtry
return(good)
#enddef
#
# lisp_remove_eid_from_map_notify_queue
#
# Check to see if any EIDs from the input list are in the Map-Notify
# retransmission queue. If so, remove them. That is, pop the key from the
# dictionary array. The key is the catentation of the xTR address and
# map-notify nonce.
#
def lisp_remove_eid_from_map_notify_queue(eid_list):
#
# Determine from the supplied EID-list, if any EID is in any EID-list of
# a queued Map-Notify.
#
keys_to_remove = []
for eid_tuple in eid_list:
for mn_key in lisp_map_notify_queue:
map_notify = lisp_map_notify_queue[mn_key]
if (eid_tuple not in map_notify.eid_list): continue
keys_to_remove.append(mn_key)
timer = map_notify.retransmit_timer
if (timer): timer.cancel()
lprint("Remove from Map-Notify queue nonce 0x{} for EID {}".\
format(map_notify.nonce_key, green(eid_tuple, False)))
#endfor
#endfor
#
# Now remove keys that were determined to be removed.
#
for mn_key in keys_to_remove: lisp_map_notify_queue.pop(mn_key)
return
#enddef
#
# lisp_decrypt_map_register
#
# Check if we should just return a non encrypted packet, or decrypt and return
# a plaintext Map-Register message.
#
def lisp_decrypt_map_register(packet):
#
# Parse first 4 bytes which is not encrypted. If packet is not encrypted,
# return to caller. If it is encrypted, get 3-bit key-id next to e-bit.
#
header = socket.ntohl(struct.unpack("I", packet[0:4])[0])
e_bit = (header >> 13) & 0x1
if (e_bit == 0): return(packet)
ekey_id = (header >> 14) & 0x7
#
# Use 16-byte key which is 32 string characters.
#
try:
ekey = lisp_ms_encryption_keys[ekey_id]
ekey = ekey.zfill(32)
iv = "0" * 8
except:
lprint("Cannot decrypt Map-Register with key-id {}".format(ekey_id))
return(None)
#endtry
d = bold("Decrypt", False)
lprint("{} Map-Register with key-id {}".format(d, ekey_id))
#
# Use 20 rounds so we can interoperate with ct-lisp mobile platforms.
#
plaintext = chacha.ChaCha(ekey, iv, 20).decrypt(packet[4::])
return(packet[0:4] + plaintext)
#enddef
#
# lisp_process_map_register
#
# Process received Map-Register message.
#
def lisp_process_map_register(lisp_sockets, packet, source, sport):
global lisp_registered_count
#
# First check if we are expecting an encrypted Map-Register. This call
# will either return a unencrypted packet, a decrypted packet, or None
# if the key-id from the Map-Register is not registered.
#
packet = lisp_decrypt_map_register(packet)
if (packet == None): return
map_register = lisp_map_register()
orig_packet, packet = map_register.decode(packet)
if (packet == None):
lprint("Could not decode Map-Register packet")
return
#endif
map_register.sport = sport
map_register.print_map_register()
#
# Verify that authentication parameters are consistent.
#
sha1_or_sha2 = True
if (map_register.auth_len == LISP_SHA1_160_AUTH_DATA_LEN):
sha1_or_sha2 = True
#endif
if (map_register.alg_id == LISP_SHA_256_128_ALG_ID):
sha1_or_sha2 = False
#endif
#
# For tracking which (S,G) RLEs have changed.
#
rle_list = []
#
# Process each EID record in Map-Register message.
#
site = None
start_eid_records = packet
eid_list = []
record_count = map_register.record_count
for i in range(record_count):
eid_record = lisp_eid_record()
rloc_record = lisp_rloc_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Register packet")
return
#endif
eid_record.print_record(" ", False)
#
# Lookup lisp_site entry.
#
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
False)
match_str = site_eid.print_eid_tuple() if site_eid else None
#
# Allowing overlapping ams registered prefixes. Make sure we get the
# configured parent entry and not the registered more-specific. This
# registration could be a more-specific of the registered more-specific
# entry.
#
if (site_eid and site_eid.accept_more_specifics == False):
if (site_eid.eid_record_matches(eid_record) == False):
parent = site_eid.parent_for_more_specifics
if (parent): site_eid = parent
#endif
#endif
#
# Check if this is a new more-specific EID-prefix registration that
# will match a static configured site-eid with "accept-more-specifics"
# configured.
#
ams = (site_eid and site_eid.accept_more_specifics)
if (ams):
ms_site_eid = lisp_site_eid(site_eid.site)
ms_site_eid.dynamic = True
ms_site_eid.eid.copy_address(eid_record.eid)
ms_site_eid.group.copy_address(eid_record.group)
ms_site_eid.parent_for_more_specifics = site_eid
ms_site_eid.add_cache()
ms_site_eid.inherit_from_ams_parent()
site_eid.more_specific_registrations.append(ms_site_eid)
site_eid = ms_site_eid
else:
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
True)
#endif
eid_str = eid_record.print_eid_tuple()
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(" {} for EID {}{}".format(notfound, green(eid_str, False),
", matched non-ams {}".format(green(match_str, False) if \
match_str else "")))
#
# Need to hop over RLOC-set so we can get to the next EID-record.
#
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
site = site_eid.site
if (ams):
e = site_eid.parent_for_more_specifics.print_eid_tuple()
lprint(" Found ams {} for site '{}' for registering prefix {}". \
format(green(e, False), site.site_name, green(eid_str, False)))
else:
e = green(site_eid.print_eid_tuple(), False)
lprint(" Found {} for site '{}' for registering prefix {}". \
format(e, site.site_name, green(eid_str, False)))
#endif
#
# Check if site configured in admin-shutdown mode.
#
if (site.shutdown):
lprint((" Rejecting registration for site '{}', configured in " +
"admin-shutdown state").format(site.site_name))
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
continue
#endif
#
# Verify authentication before processing locator-set. Quick hack
# while I figure out why sha1 and sha2 authentication is not working
# from cisco. An NX-OS Map-Register will have a 0 nonce. We are going
# to use this to bypass the authentication check.
#
key_id = map_register.key_id
if (key_id in site.auth_key):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(orig_packet, map_register.alg_id,
map_register.auth_data, password)
dynamic = "dynamic " if site_eid.dynamic else ""
passfail = bold("passed" if auth_good else "failed", False)
key_id = "key-id {}".format(key_id) if key_id == map_register.key_id \
else "bad key-id {}".format(map_register.key_id)
lprint(" Authentication {} for {}EID-prefix {}, {}".format( \
passfail, dynamic, green(eid_str, False), key_id))
#
# If the IPv6 EID is a CGA, verify signature if it exists in an
# RLOC-record.
#
cga_good = True
is_crypto_eid = (lisp_get_eid_hash(eid_record.eid) != None)
if (is_crypto_eid or site_eid.require_signature):
required = "Required " if site_eid.require_signature else ""
eid_str = green(eid_str, False)
rloc = lisp_find_sig_in_rloc_set(packet, eid_record.rloc_count)
if (rloc == None):
cga_good = False
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}, no signature found").format(required,
bold("failed", False), eid_str))
else:
cga_good = lisp_verify_cga_sig(eid_record.eid, rloc)
passfail = bold("passed" if cga_good else "failed", False)
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}").format(required, passfail, eid_str))
#endif
#endif
if (auth_good == False or cga_good == False):
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
#
# If merge being requested get individual site-eid. If not, and what
# was cached had merge bit set, set flag to issue error.
#
if (map_register.merge_register_requested):
parent = site_eid
parent.inconsistent_registration = False
#
# Clear out all registrations, there is a new site-id registering.
# Or there can be multiple sites registering for a multicast (S,G).
#
if (site_eid.group.is_null()):
if (parent.site_id != map_register.site_id):
parent.site_id = map_register.site_id
parent.registered = False
parent.individual_registrations = {}
parent.registered_rlocs = []
lisp_registered_count -= 1
#endif
#endif
key = map_register.xtr_id
if (key in site_eid.individual_registrations):
site_eid = site_eid.individual_registrations[key]
else:
site_eid = lisp_site_eid(site)
site_eid.eid.copy_address(parent.eid)
site_eid.group.copy_address(parent.group)
site_eid.encrypt_json = parent.encrypt_json
parent.individual_registrations[key] = site_eid
#endif
else:
site_eid.inconsistent_registration = \
site_eid.merge_register_requested
#endif
site_eid.map_registers_received += 1
#
# If TTL is 0, unregister entry if source of Map-Reqister is in the
# list of currently registered RLOCs.
#
bad = (site_eid.is_rloc_in_rloc_set(source) == False)
if (eid_record.record_ttl == 0 and bad):
lprint(" Ignore deregistration request from {}".format( \
red(source.print_address_no_iid(), False)))
continue
#endif
#
# Clear out previously stored RLOCs. Put new ones in if validated
# against configured ones.
#
previous_rlocs = site_eid.registered_rlocs
site_eid.registered_rlocs = []
#
# Process each RLOC record in EID record.
#
start_rloc_records = packet
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None, site_eid.encrypt_json)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
rloc_record.print_record(" ")
#
# Run RLOC in Map-Register against configured RLOC policies.
#
if (len(site.allowed_rlocs) > 0):
addr_str = rloc_record.rloc.print_address()
if (addr_str not in site.allowed_rlocs):
lprint((" Reject registration, RLOC {} not " + \
"configured in allowed RLOC-set").format( \
red(addr_str, False)))
site_eid.registered = False
packet = rloc_record.end_of_rlocs(packet,
eid_record.rloc_count - j - 1)
break
#endif
#endif
#
# RLOC validated good. Otherwise, go to next EID record
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, source)
#
# If the source of the Map-Register is in the locator-set, then
# store if it wants Map-Notify messages when a new locator-set
# is registered later.
#
if (source.is_exact_match(rloc.rloc)):
rloc.map_notify_requested = map_register.map_notify_requested
#endif
#
# Add to RLOC set for site-eid.
#
site_eid.registered_rlocs.append(rloc)
#endfor
changed_rloc_set = \
(site_eid.do_rloc_sets_match(previous_rlocs) == False)
#
# Do not replace RLOCs if the Map-Register is a refresh and the
# locator-set is different.
#
if (map_register.map_register_refresh and changed_rloc_set and
site_eid.registered):
lprint(" Reject registration, refreshes cannot change RLOC-set")
site_eid.registered_rlocs = previous_rlocs
continue
#endif
#
# Copy fields from packet into internal data structure. First set
# site EID specific state.
#
if (site_eid.registered == False):
site_eid.first_registered = lisp_get_timestamp()
lisp_registered_count += 1
#endif
site_eid.last_registered = lisp_get_timestamp()
site_eid.registered = (eid_record.record_ttl != 0)
site_eid.last_registerer = source
#
# Now set site specific state.
#
site_eid.auth_sha1_or_sha2 = sha1_or_sha2
site_eid.proxy_reply_requested = map_register.proxy_reply_requested
site_eid.lisp_sec_present = map_register.lisp_sec_present
site_eid.map_notify_requested = map_register.map_notify_requested
site_eid.mobile_node_requested = map_register.mobile_node
site_eid.merge_register_requested = \
map_register.merge_register_requested
site_eid.use_register_ttl_requested = map_register.use_ttl_for_timeout
if (site_eid.use_register_ttl_requested):
site_eid.register_ttl = eid_record.store_ttl()
else:
site_eid.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
#endif
site_eid.xtr_id_present = map_register.xtr_id_present
if (site_eid.xtr_id_present):
site_eid.xtr_id = map_register.xtr_id
site_eid.site_id = map_register.site_id
#endif
#
# If merge requested, do it now for this EID-prefix.
#
if (map_register.merge_register_requested):
if (parent.merge_in_site_eid(site_eid)):
rle_list.append([eid_record.eid, eid_record.group])
#endif
if (map_register.map_notify_requested):
lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record)
#endif
#endif
if (changed_rloc_set == False): continue
if (len(rle_list) != 0): continue
eid_list.append(site_eid.print_eid_tuple())
#
# Send Map-Notify if the RLOC-set changed for thie site-eid. Send it
# to the previously registered RLOCs only if they requested it. Do
# not consider RLOC-sets with RLEs in them because at the end of
# the EID-record loop, we'll send a multicast Map-Notify.
#
peid_record = copy.deepcopy(eid_record)
eid_record = eid_record.encode()
eid_record += start_rloc_records
el = [site_eid.print_eid_tuple()]
lprint(" Changed RLOC-set, Map-Notifying old RLOC-set")
for rloc in previous_rlocs:
if (rloc.map_notify_requested == False): continue
if (rloc.rloc.is_exact_match(source)): continue
lisp_build_map_notify(lisp_sockets, eid_record, el, 1, rloc.rloc,
LISP_CTRL_PORT, map_register.nonce, map_register.key_id,
map_register.alg_id, map_register.auth_len, site, False)
#endfor
#
# Check subscribers.
#
lisp_notify_subscribers(lisp_sockets, peid_record, start_rloc_records,
site_eid.eid, site)
#endfor
#
# Send Map-Noitfy to ITRs if any (S,G) RLE has changed.
#
if (len(rle_list) != 0):
lisp_queue_multicast_map_notify(lisp_sockets, rle_list)
#endif
#
# The merged Map-Notify will serve as a Map-Register ack. So don't need
# to send another one below.
#
if (map_register.merge_register_requested): return
#
# Should we ack the Map-Register? Only if the Want-Map-Notify bit was set
# by the registerer.
#
if (map_register.map_notify_requested and site != None):
lisp_build_map_notify(lisp_sockets, start_eid_records, eid_list,
map_register.record_count, source, sport, map_register.nonce,
map_register.key_id, map_register.alg_id, map_register.auth_len,
site, True)
#endif
return
#enddef
#
# lisp_process_unicast_map_notify
#
# Have ITR process a Map-Notify as a result of sending a subscribe-request.
# Update map-cache entry with new RLOC-set.
#
def lisp_process_unicast_map_notify(lisp_sockets, packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# If no map-cache entry exists or does not have action LISP_SEND_
# PUBSUB_ACTION, ignore.
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.eid)
if (mc == None):
e = green(eid_str, False)
lprint("Ignoring Map-Notify EID {}, no subscribe-request entry". \
format(e))
continue
#endif
#
# Check if map-cache entry is configured subscribe-request entry.
# Otherwise, it is an entry created from the subscribe-request entry
# from a returned Map-Notify.
#
if (mc.action != LISP_SEND_PUBSUB_ACTION):
if (mc.subscribed_eid == None):
e = green(eid_str, False)
lprint("Ignoring Map-Notify for non-subscribed EID {}". \
format(e))
continue
#endif
#endif
#
# Check if this is the map-cache entry for the EID or the SEND_PUBSUB
# configured map-cache entry. Reuse the memory if the EID entry exists
# and empty RLOC-set since we will rebuild it.
#
old_rloc_set = []
if (mc.action == LISP_SEND_PUBSUB_ACTION):
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
subscribed_eid = copy.deepcopy(eid_record.eid)
subscribed_group = copy.deepcopy(eid_record.group)
else:
subscribed_eid = mc.subscribed_eid
subscribed_group = mc.subscribed_group
old_rloc_set = mc.rloc_set
mc.delete_rlocs_from_rloc_probe_list()
mc.rloc_set = []
#endif
#
# Store some data from the EID-record of the Map-Notify.
#
mc.mapping_source = None if source == "lisp-itr" else source
mc.map_cache_ttl = eid_record.store_ttl()
mc.subscribed_eid = subscribed_eid
mc.subscribed_group = subscribed_group
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(old_rloc_set) != 0 and eid_record.rloc_count == 0):
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(eid_str, False)))
continue
#endif
#
# Now add all RLOCs to a new RLOC-set. If the RLOC existed in old set,
# copy old RLOC data. We want to retain, uptimes, stats, and RLOC-
# probe data in the new entry with the same RLOC address.
#
new = replaced = 0
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
#
# See if this RLOC address is in old RLOC-set, if so, do copy.
#
found = False
for r in old_rloc_set:
if (r.rloc.is_exact_match(rloc_record.rloc)):
found = True
break
#endif
#endfor
if (found):
rloc = copy.deepcopy(r)
replaced += 1
else:
rloc = lisp_rloc()
new += 1
#endif
#
# Move data from RLOC-record of Map-Notify to RLOC entry.
#
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
mc.rloc_set.append(rloc)
#endfor
lprint("Update {} map-cache entry with {}/{} new/replaced RLOCs".\
format(green(eid_str, False), new, replaced))
#
# Build best RLOC-set and write to external data-plane, if any.
#
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
#endfor
#
# Find map-server data structure from source address of Map-Notify then
# send Map-Notify-Ack to it.
#
ms = lisp_get_map_server(source)
if (ms == None):
lprint("Cannot find Map-Server for Map-Notify source address {}".\
format(source.print_address_no_iid()))
return
#endif
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
#enddef
#
# lisp_process_multicast_map_notify
#
# Have the ITR process receive a multicast Map-Notify message. We will update
# the map-cache with a new RLE for the (S,G) entry. We do not have to
# authenticate the Map-Notify or send a Map-Notify-Ack since the lisp-etr
# process as already done so.
#
def lisp_process_multicast_map_notify(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
#
# Get or create map-cache entry for (S,G).
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
if (mc == None):
allow, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (allow == False): continue
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
#endif
#
# Gleaned map-cache entries always override what is regitered in
# the mapping system. Since the mapping system RLE entries are RTRs
# and RTRs store gleaned mappings for group members.
#
if (mc.gleaned):
lprint("Ignore Map-Notify for gleaned {}".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
mc.mapping_source = None if source == "lisp-etr" else source
mc.map_cache_ttl = eid_record.store_ttl()
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(mc.rloc_set) != 0 and eid_record.rloc_count == 0):
mc.rloc_set = []
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
rtr_mc = mc.rtrs_in_rloc_set()
#
# If there are RTRs in the RLOC set for an existing map-cache entry,
# only put RTR RLOCs from the Map-Notify in the map-cache.
#
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
if (eid_record.group.is_null()): continue
if (rloc_record.rle == None): continue
#
# Get copy of stats from old stored record so the display can
# look continuous even though the physical pointer is changing.
#
stats = mc.rloc_set[0].stats if len(mc.rloc_set) != 0 else None
#
# Store in map-cache.
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
if (stats != None): rloc.stats = copy.deepcopy(stats)
if (rtr_mc and rloc.is_rtr() == False): continue
mc.rloc_set = [rloc]
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with RLE {}".format( \
green(mc.print_eid_tuple(), False),
rloc.rle.print_rle(False, True)))
#endfor
#endfor
return
#enddef
#
# lisp_process_map_notify
#
# Process Map-Notify message. All that needs to be done is to validate it with
# the Map-Server that sent it and return a Map-Notify-Ack.
#
def lisp_process_map_notify(lisp_sockets, orig_packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(orig_packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
#
# Get map-server so we can do statistics and find auth-key, if a auth-key
# was provided in a Map-Notify message.
#
s = source.print_address()
if (map_notify.alg_id != 0 or map_notify.auth_len != 0):
ms = None
for key in lisp_map_servers_list:
if (key.find(s) == -1): continue
ms = lisp_map_servers_list[key]
#endfor
if (ms == None):
lprint((" Could not find Map-Server {} to authenticate " + \
"Map-Notify").format(s))
return
#endif
ms.map_notifies_received += 1
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, ms.password)
lprint(" Authentication {} for Map-Notify".format("succeeded" if \
auth_good else "failed"))
if (auth_good == False): return
else:
ms = lisp_ms(s, None, "", 0, "", False, False, False, False, 0, 0, 0,
None)
#endif
#
# Send out Map-Notify-Ack. Skip over packet so lisp_send_map_notify()
# starts the packet with EID-records.
#
eid_records = map_notify.eid_records
if (map_notify.record_count == 0):
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#endif
#
# If this is a Map-Notify for an (S,G) entry, send the message to the
# lisp-itr process so it can update its map-cache for an active source
# in this site. There is probably a RLE change that the ITR needs to know
# about.
#
eid_record = lisp_eid_record()
packet = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Notify packet")
return
#endif
rloc_record.print_record(" ")
#endfor
#
# Right now, don't do anything with non-multicast EID records.
#
if (eid_record.group.is_null() == False):
#
# Forward to lisp-itr process via the lisp-core process so multicast
# Map-Notify messages are processed by the ITR process.
#
lprint("Send {} Map-Notify IPC message to ITR process".format( \
green(eid_record.print_eid_tuple(), False)))
ipc = lisp_control_packet_ipc(orig_packet, s, "lisp-itr", 0)
lisp_ipc(ipc, lisp_sockets[2], "lisp-core-pkt")
#endif
#
# Send Map-Notify-Ack after processing contents of Map-Notify.
#
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#enddef
#
# lisp_process_map_notify_ack
#
# Process received Map-Notify-Ack. This causes the Map-Notify to be removed
# from the lisp_map_notify_queue{}.
#
def lisp_process_map_notify_ack(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify-Ack packet")
return
#endif
map_notify.print_notify()
#
# Get an EID-prefix out of the Map-Notify-Ack so we can find the site
# associated with it.
#
if (map_notify.record_count < 1):
lprint("No EID-prefix found, cannot authenticate Map-Notify-Ack")
return
#endif
eid_record = lisp_eid_record()
if (eid_record.decode(map_notify.eid_records) == None):
lprint("Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack")
return
#endof
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# Find site associated with EID-prefix from first record.
#
if (map_notify.alg_id != LISP_NONE_ALG_ID and map_notify.auth_len != 0):
site_eid = lisp_sites_by_eid.lookup_cache(eid_record.eid, True)
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(("{} for EID {}, cannot authenticate Map-Notify-Ack"). \
format(notfound, green(eid_str, False)))
return
#endif
site = site_eid.site
#
# Count it.
#
site.map_notify_acks_received += 1
key_id = map_notify.key_id
if (key_id in site.auth_key):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, password)
key_id = "key-id {}".format(key_id) if key_id == map_notify.key_id \
else "bad key-id {}".format(map_notify.key_id)
lprint(" Authentication {} for Map-Notify-Ack, {}".format( \
"succeeded" if auth_good else "failed", key_id))
if (auth_good == False): return
#endif
#
# Remove Map-Notify from retransmission queue.
#
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
etr = source.print_address()
key = map_notify.nonce_key
if (key in lisp_map_notify_queue):
map_notify = lisp_map_notify_queue.pop(key)
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
else:
lprint("Map-Notify with nonce 0x{} queue entry not found for {}". \
format(map_notify.nonce_key, red(etr, False)))
#endif
return
#enddef
#
# lisp_map_referral_loop
#
# Check to see if arrived Map-Referral EID-prefix is more-specific than the
# last one we received.
#
def lisp_map_referral_loop(mr, eid, group, action, s):
if (action not in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)): return(False)
if (mr.last_cached_prefix[0] == None): return(False)
#
# Check group first, if any. Then EID-prefix as source if (S,G).
#
loop = False
if (group.is_null() == False):
loop = mr.last_cached_prefix[1].is_more_specific(group)
#endif
if (loop == False):
loop = mr.last_cached_prefix[0].is_more_specific(eid)
#endif
if (loop):
prefix_str = lisp_print_eid_tuple(eid, group)
cached_str = lisp_print_eid_tuple(mr.last_cached_prefix[0],
mr.last_cached_prefix[1])
lprint(("Map-Referral prefix {} from {} is not more-specific " + \
"than cached prefix {}").format(green(prefix_str, False), s,
cached_str))
#endif
return(loop)
#enddef
#
# lisp_process_map_referral
#
# This function processes a Map-Referral message by a Map-Resolver.
#
def lisp_process_map_referral(lisp_sockets, packet, source):
map_referral = lisp_map_referral()
packet = map_referral.decode(packet)
if (packet == None):
lprint("Could not decode Map-Referral packet")
return
#endif
map_referral.print_map_referral()
s = source.print_address()
nonce = map_referral.nonce
#
# Process each EID record in Map-Reply message.
#
for i in range(map_referral.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Referral packet")
return
#endif
eid_record.print_record(" ", True)
#
# Check if we have an outstanding request for this Map-Referral reply.
#
key = str(nonce)
if (key not in lisp_ddt_map_requestQ):
lprint(("Map-Referral nonce 0x{} from {} not found in " + \
"Map-Request queue, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
mr = lisp_ddt_map_requestQ[key]
if (mr == None):
lprint(("No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
#
# Check for Map-Referral looping. If there is no loop cache the EID
# returned from the Map-Referral in the Map-Request queue entry.
#
if (lisp_map_referral_loop(mr, eid_record.eid, eid_record.group,
eid_record.action, s)):
mr.dequeue_map_request()
continue
#endif
mr.last_cached_prefix[0] = eid_record.eid
mr.last_cached_prefix[1] = eid_record.group
#
# Lookup referral in referral-cache.
#
add_or_replace = False
referral = lisp_referral_cache_lookup(eid_record.eid, eid_record.group,
True)
if (referral == None):
add_or_replace = True
referral = lisp_referral()
referral.eid = eid_record.eid
referral.group = eid_record.group
if (eid_record.ddt_incomplete == False): referral.add_cache()
elif (referral.referral_source.not_set()):
lprint("Do not replace static referral entry {}".format( \
green(referral.print_eid_tuple(), False)))
mr.dequeue_map_request()
continue
#endif
action = eid_record.action
referral.referral_source = source
referral.referral_type = action
ttl = eid_record.store_ttl()
referral.referral_ttl = ttl
referral.expires = lisp_set_timestamp(ttl)
#
# Mark locator up if the Map-Referral source is in the referral-set.
#
negative = referral.is_referral_negative()
if (s in referral.referral_set):
ref_node = referral.referral_set[s]
if (ref_node.updown == False and negative == False):
ref_node.updown = True
lprint("Change up/down status for referral-node {} to up". \
format(s))
elif (ref_node.updown == True and negative == True):
ref_node.updown = False
lprint(("Change up/down status for referral-node {} " + \
"to down, received negative referral").format(s))
#endif
#endif
#
# Set dirty-bit so we can remove referral-nodes from cached entry
# that wasn't in packet.
#
dirty_set = {}
for key in referral.referral_set: dirty_set[key] = None
#
# Process each referral RLOC-record in EID record.
#
for i in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Referral packet")
return
#endif
rloc_record.print_record(" ")
#
# Copy over existing referral-node
#
addr_str = rloc_record.rloc.print_address()
if (addr_str not in referral.referral_set):
ref_node = lisp_referral_node()
ref_node.referral_address.copy_address(rloc_record.rloc)
referral.referral_set[addr_str] = ref_node
if (s == addr_str and negative): ref_node.updown = False
else:
ref_node = referral.referral_set[addr_str]
if (addr_str in dirty_set): dirty_set.pop(addr_str)
#endif
ref_node.priority = rloc_record.priority
ref_node.weight = rloc_record.weight
#endfor
#
# Now remove dirty referral-node entries.
#
for key in dirty_set: referral.referral_set.pop(key)
eid_str = referral.print_eid_tuple()
if (add_or_replace):
if (eid_record.ddt_incomplete):
lprint("Suppress add {} to referral-cache".format( \
green(eid_str, False)))
else:
lprint("Add {}, referral-count {} to referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
else:
lprint("Replace {}, referral-count: {} in referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
#
# Process actions.
#
if (action == LISP_DDT_ACTION_DELEGATION_HOLE):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
if (action == LISP_DDT_ACTION_NOT_AUTH):
if (mr.tried_root):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 0, None, False)
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, True)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG):
if (s in referral.referral_set):
ref_node = referral.referral_set[s]
ref_node.updown = False
#endif
if (len(referral.referral_set) == 0):
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)):
if (mr.eid.is_exact_match(eid_record.eid)):
if (not mr.tried_root):
lisp_send_ddt_map_request(mr, True)
else:
lisp_send_negative_map_reply(mr.lisp_sockets,
referral.eid, referral.group, mr.nonce, mr.itr,
mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_ACK): mr.dequeue_map_request()
#endfor
return
#enddef
#
# lisp_process_ecm
#
# Process a received Encapsulated-Control-Message. It is assumed for right now
# that all ECMs have a Map-Request embedded.
#
def lisp_process_ecm(lisp_sockets, packet, source, ecm_port):
ecm = lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lprint("Could not decode ECM packet")
return
#endif
ecm.print_ecm()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return
#endif
packet_type = header.type
del(header)
if (packet_type != LISP_MAP_REQUEST):
lprint("Received ECM without Map-Request inside")
return
#endif
#
# Process Map-Request.
#
mr_port = ecm.udp_sport
timestamp = time.time()
lisp_process_map_request(lisp_sockets, packet, source, ecm_port,
ecm.source, mr_port, ecm.ddt, -1, timestamp)
return
#enddef
#------------------------------------------------------------------------------
#
# lisp_send_map_register
#
# Compute authenticaiton for Map-Register message and sent to supplied
# Map-Server.
#
def lisp_send_map_register(lisp_sockets, packet, map_register, ms):
#
# If we are doing LISP-Decent and have a multicast group configured as
# a Map-Server, we can't join the group by using the group so we have to
# send to the loopback address to bootstrap our membership. We join to
# one other member of the peer-group so we can get the group membership.
#
dest = ms.map_server
if (lisp_decent_push_configured and dest.is_multicast_address() and
(ms.map_registers_multicast_sent == 1 or ms.map_registers_sent == 1)):
dest = copy.deepcopy(dest)
dest.address = 0x7f000001
b = bold("Bootstrap", False)
g = ms.map_server.print_address_no_iid()
lprint("{} mapping system for peer-group {}".format(b, g))
#endif
#
# Modify authentication hash in Map-Register message if supplied when
# lisp_map_register() was called.
#
packet = lisp_compute_auth(packet, map_register, ms.password)
#
# Should we encrypt the Map-Register? Use 16-byte key which is
# 32 string characters. Use 20 rounds so the decrypter can interoperate
# with ct-lisp mobile platforms.
#
if (ms.ekey != None):
ekey = ms.ekey.zfill(32)
iv = "0" * 8
ciphertext = chacha.ChaCha(ekey, iv, 20).encrypt(packet[4::])
packet = packet[0:4] + ciphertext
e = bold("Encrypt", False)
lprint("{} Map-Register with key-id {}".format(e, ms.ekey_id))
#endif
decent = ""
if (lisp_decent_pull_xtr_configured()):
decent = ", decent-index {}".format(bold(ms.dns_name, False))
#endif
lprint("Send Map-Register to map-server {}{}{}".format( \
dest.print_address(), ", ms-name '{}'".format(ms.ms_name), decent))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_ipc_to_core
#
# Send LISP control packet that is to be source from UDP port 4342 to the
# lisp-core process.
#
def lisp_send_ipc_to_core(lisp_socket, packet, dest, port):
source = lisp_socket.getsockname()
dest = dest.print_address_no_iid()
lprint("Send IPC {} bytes to {} {}, control-packet: {}".format( \
len(packet), dest, port, lisp_format_packet(packet)))
packet = lisp_control_packet_ipc(packet, source, dest, port)
lisp_ipc(packet, lisp_socket, "lisp-core-pkt")
return
#enddef
#
# lisp_send_map_reply
#
# Send Map-Reply message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_reply(lisp_sockets, packet, dest, port):
lprint("Send Map-Reply to {}".format(dest.print_address_no_iid()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_referral
#
# Send Map-Referral message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_referral(lisp_sockets, packet, dest, port):
lprint("Send Map-Referral to {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_notify
#
# Send Map-Notify message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_notify(lisp_sockets, packet, dest, port):
lprint("Send Map-Notify to xTR {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_ecm
#
# Send Encapsulated Control Message.
#
def lisp_send_ecm(lisp_sockets, packet, inner_source, inner_sport, inner_dest,
outer_dest, to_etr=False, to_ms=False, ddt=False):
if (inner_source == None or inner_source.is_null()):
inner_source = inner_dest
#endif
#
# For sending Map-Requests, if the NAT-traversal configured, use same
# socket used to send the Info-Request.
#
if (lisp_nat_traversal):
sport = lisp_get_any_translated_port()
if (sport != None): inner_sport = sport
#endif
ecm = lisp_ecm(inner_sport)
ecm.to_etr = to_etr if lisp_is_running("lisp-etr") else False
ecm.to_ms = to_ms if lisp_is_running("lisp-ms") else False
ecm.ddt = ddt
ecm_packet = ecm.encode(packet, inner_source, inner_dest)
if (ecm_packet == None):
lprint("Could not encode ECM message")
return
#endif
ecm.print_ecm()
packet = ecm_packet + packet
addr_str = outer_dest.print_address_no_iid()
lprint("Send Encapsulated-Control-Message to {}".format(addr_str))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#------------------------------------------------------------------------------
#
# Below are constant definitions used for internal data structures.
#
LISP_AFI_GEO_COORD = -3
LISP_AFI_IID_RANGE = -2
LISP_AFI_ULTIMATE_ROOT = -1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
#------------------------------------------------------------------------------
#
# This is a general address format for EIDs, RLOCs, EID-prefixes in any AFI or
# LCAF format.
#
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
#
# byte_swap_64
#
# Byte-swap a 64-bit number.
#
def byte_swap_64(address):
addr = \
((address & 0x00000000000000ff) << 56) | \
((address & 0x000000000000ff00) << 40) | \
((address & 0x0000000000ff0000) << 24) | \
((address & 0x00000000ff000000) << 8) | \
((address & 0x000000ff00000000) >> 8) | \
((address & 0x0000ff0000000000) >> 24) | \
((address & 0x00ff000000000000) >> 40) | \
((address & 0xff00000000000000) >> 56)
return(addr)
#enddef
#
# lisp_cache is a data structure to implement a multi-way tree. The first
# level array is an associative array of mask-lengths. Then each mask-length
# entry will be an associatative array of the following key:
#
# <32-bit-instance-id> <16-bit-address-family> <eid-prefix>
#
# Data structure:
# self.cache{}
# self.cache_sorted[]
# self.cache{}.entries{}
# self.cache{}.entries_sorted[]
#
class lisp_cache_entries(object):
def __init__(self):
self.entries = {}
self.entries_sorted = []
#enddef
#endclass
class lisp_cache(object):
def __init__(self):
self.cache = {}
self.cache_sorted = []
self.cache_count = 0
#enddef
def cache_size(self):
return(self.cache_count)
#enddef
def build_key(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT):
ml = 0
elif (prefix.afi == LISP_AFI_IID_RANGE):
ml = prefix.mask_len
else:
ml = prefix.mask_len + 48
#endif
iid = lisp_hex_string(prefix.instance_id).zfill(8)
afi = lisp_hex_string(prefix.afi).zfill(4)
if (prefix.afi > 0):
if (prefix.is_binary()):
length = prefix.addr_length() * 2
addr = lisp_hex_string(prefix.address).zfill(length)
else:
addr = prefix.address
#endif
elif (prefix.afi == LISP_AFI_GEO_COORD):
afi = "8003"
addr = prefix.address.print_geo()
else:
afi = ""
addr = ""
#endif
key = iid + afi + addr
return([ml, key])
#enddef
def add_cache(self, prefix, entry):
if (prefix.is_binary()): prefix.zero_host_bits()
ml, key = self.build_key(prefix)
if (ml not in self.cache):
self.cache[ml] = lisp_cache_entries()
self.cache_sorted = self.sort_in_entry(self.cache_sorted, ml)
#endif
if (key not in self.cache[ml].entries):
self.cache_count += 1
#endif
self.cache[ml].entries[key] = entry
#enddef
def lookup_cache(self, prefix, exact):
ml_key, key = self.build_key(prefix)
if (exact):
if (ml_key not in self.cache): return(None)
if (key not in self.cache[ml_key].entries): return(None)
return(self.cache[ml_key].entries[key])
#endif
found = None
for ml in self.cache_sorted:
if (ml_key < ml): return(found)
for entry in list(self.cache[ml].entries.values()):
if (prefix.is_more_specific(entry.eid)):
if (found == None or
entry.eid.is_more_specific(found.eid)): found = entry
#endif
#endfor
#endfor
return(found)
#enddef
def delete_cache(self, prefix):
ml, key = self.build_key(prefix)
if (ml not in self.cache): return
if (key not in self.cache[ml].entries): return
self.cache[ml].entries.pop(key)
self.cache_count -= 1
#enddef
def walk_cache(self, function, parms):
for ml in self.cache_sorted:
for entry in list(self.cache[ml].entries.values()):
status, parms = function(entry, parms)
if (status == False): return(parms)
#endfor
#endfor
return(parms)
#enddef
def sort_in_entry(self, table, value):
if (table == []): return([value])
t = table
while (True):
if (len(t) == 1):
if (value == t[0]): return(table)
index = table.index(t[0])
if (value < t[0]):
return(table[0:index] + [value] + table[index::])
#endif
if (value > t[0]):
return(table[0:index+1] + [value] + table[index+1::])
#endif
#endif
index = old_div(len(t), 2)
t = t[0:index] if (value < t[index]) else t[index::]
#endwhile
return([])
#enddef
def print_cache(self):
lprint("Printing contents of {}: ".format(self))
if (self.cache_size() == 0):
lprint(" Cache is empty")
return
#endif
for ml in self.cache_sorted:
for key in self.cache[ml].entries:
entry = self.cache[ml].entries[key]
lprint(" Mask-length: {}, key: {}, entry: {}".format(ml, key,
entry))
#endfor
#endfor
#enddef
#endclass
#
# Caches.
#
lisp_referral_cache = lisp_cache()
lisp_ddt_cache = lisp_cache()
lisp_sites_by_eid = lisp_cache()
lisp_map_cache = lisp_cache()
lisp_db_for_lookups = lisp_cache() # Elements are class lisp_mapping()
#
# lisp_map_cache_lookup
#
# Do hierarchical lookup in the lisp_map_cache lisp_cache(). This is used
# by the ITR and RTR data-planes.
#
def lisp_map_cache_lookup(source, dest):
multicast = dest.is_multicast_address()
#
# Look up destination in map-cache.
#
mc = lisp_map_cache.lookup_cache(dest, False)
if (mc == None):
eid_str = source.print_sg(dest) if multicast else dest.print_address()
eid_str = green(eid_str, False)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Unicast lookup succeeded.
#
if (multicast == False):
m = green(mc.eid.print_prefix(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(dest.print_address(), False), m))
return(mc)
#endif
#
# If destination is multicast, then do source lookup.
#
mc = mc.lookup_source_cache(source, False)
if (mc == None):
eid_str = source.print_sg(dest)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Multicast lookup succeeded.
#
m = green(mc.print_eid_tuple(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(source.print_sg(dest), False), m))
return(mc)
#enddef
#
# lisp_referral_cache_lookup
#
# Do hierarchical lookup in the lisp_referral_cache lisp_cache().
#
def lisp_referral_cache_lookup(eid, group, exact):
if (group and group.is_null()):
ref = lisp_referral_cache.lookup_cache(eid, exact)
return(ref)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid == None or eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ref = lisp_referral_cache.lookup_cache(group, exact)
if (ref == None): return(None)
sref = ref.lookup_source_cache(eid, exact)
if (sref): return(sref)
if (exact): ref = None
return(ref)
#enddef
#
# lisp_ddt_cache_lookup
#
# Do hierarchical lookup in the lisp_ddt_cache lisp_cache().
#
def lisp_ddt_cache_lookup(eid, group, exact):
if (group.is_null()):
ddt = lisp_ddt_cache.lookup_cache(eid, exact)
return(ddt)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ddt = lisp_ddt_cache.lookup_cache(group, exact)
if (ddt == None): return(None)
sddt = ddt.lookup_source_cache(eid, exact)
if (sddt): return(sddt)
if (exact): ddt = None
return(ddt)
#enddef
#
# lisp_site_eid_lookup
#
# Do hierarchical lookup in the lisp_sites_by_eid lisp_cache().
#
def lisp_site_eid_lookup(eid, group, exact):
if (group.is_null()):
site_eid = lisp_sites_by_eid.lookup_cache(eid, exact)
return(site_eid)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
site_eid = lisp_sites_by_eid.lookup_cache(group, exact)
if (site_eid == None): return(None)
#
# There is a special case we have to deal with here. If there exists a
# (0.0.0.0/0, 224.0.0.0/4) entry that has been configured with accept-
# more-specifics, this entry will not be retunred if there is a more-
# specific already cached. For instance, if a Map-Register was received
# for (1.1.1.1/32, 224.1.1.1/32), it will match the (0.0.0.0/0,
# 224.0.0.0/4) entry. But when (1.1.1.1/32, 224.1.1.1/32) is cached and
# a Map-Register is received for (2.2.2.2/32, 224.1.1.1/32), rather than
# matching the ams entry, it will match the more specific entry and return
# (*, 224.1.1.1/32). Since the source lookup will be performed below and
# not find 2.2.2.2, what is retunred is 224.1.1.1/32 and not 224.0.0.0/4.
#
# So we will look at the retunred entry and if a source is not found, we
# will check to see if the parent of the 224.1.1.1/32 matches the group
# we are looking up. This, of course, is only done for longest match
# lookups.
#
seid = site_eid.lookup_source_cache(eid, exact)
if (seid): return(seid)
if (exact):
site_eid = None
else:
parent = site_eid.parent_for_more_specifics
if (parent and parent.accept_more_specifics):
if (group.is_more_specific(parent.group)): site_eid = parent
#endif
#endif
return(site_eid)
#enddef
#
# LISP Address encodings. Both in AFI formats and LCAF formats.
#
# Here is an EID encoded in:
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# There is a python parcularity with shifting greater than 120 bits to the
# left. If the high-order bit hits bit 127, then it shifts it another 8 bits.
# This causes IPv6 addresses to lose their high-order byte. So note the check
# for shift >= 120 below.
#
class lisp_address(object):
def __init__(self, afi, addr_str, mask_len, iid):
self.afi = afi
self.mask_len = mask_len
self.instance_id = iid
self.iid_list = []
self.address = 0
if (addr_str != ""): self.store_address(addr_str)
#enddef
def copy_address(self, addr):
if (addr == None): return
self.afi = addr.afi
self.address = addr.address
self.mask_len = addr.mask_len
self.instance_id = addr.instance_id
self.iid_list = addr.iid_list
#enddef
def make_default_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
self.mask_len = 0
self.address = 0
#enddef
def make_default_multicast_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
if (self.afi == LISP_AFI_IPV4):
self.address = 0xe0000000
self.mask_len = 4
#endif
if (self.afi == LISP_AFI_IPV6):
self.address = 0xff << 120
self.mask_len = 8
#endif
if (self.afi == LISP_AFI_MAC):
self.address = 0xffffffffffff
self.mask_len = 48
#endif
#enddef
def not_set(self):
return(self.afi == LISP_AFI_NONE)
#enddef
def is_private_address(self):
if (self.is_ipv4() == False): return(False)
addr = self.address
if (((addr & 0xff000000) >> 24) == 10): return(True)
if (((addr & 0xff000000) >> 24) == 172):
byte2 = (addr & 0x00ff0000) >> 16
if (byte2 >= 16 and byte2 <= 31): return(True)
#endif
if (((addr & 0xffff0000) >> 16) == 0xc0a8): return(True)
return(False)
#enddef
def is_multicast_address(self):
if (self.is_ipv4()): return(self.is_ipv4_multicast())
if (self.is_ipv6()): return(self.is_ipv6_multicast())
if (self.is_mac()): return(self.is_mac_multicast())
return(False)
#enddef
def host_mask_len(self):
if (self.afi == LISP_AFI_IPV4): return(LISP_IPV4_HOST_MASK_LEN)
if (self.afi == LISP_AFI_IPV6): return(LISP_IPV6_HOST_MASK_LEN)
if (self.afi == LISP_AFI_MAC): return(LISP_MAC_HOST_MASK_LEN)
if (self.afi == LISP_AFI_E164): return(LISP_E164_HOST_MASK_LEN)
if (self.afi == LISP_AFI_NAME): return(len(self.address) * 8)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()) * 8)
#endif
return(0)
#enddef
def is_iana_eid(self):
if (self.is_ipv6() == False): return(False)
addr = self.address >> 96
return(addr == 0x20010005)
#enddef
def addr_length(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(16)
if (self.afi == LISP_AFI_MAC): return(6)
if (self.afi == LISP_AFI_E164): return(8)
if (self.afi == LISP_AFI_LCAF): return(0)
if (self.afi == LISP_AFI_NAME): return(len(self.address) + 1)
if (self.afi == LISP_AFI_IID_RANGE): return(4)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()))
#endif
return(0)
#enddef
def afi_to_version(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(6)
return(0)
#enddef
def packet_format(self):
#
# Note that "I" is used to produce 4 bytes because when "L" is used,
# it was producing 8 bytes in struct.pack().
#
if (self.afi == LISP_AFI_IPV4): return("I")
if (self.afi == LISP_AFI_IPV6): return("QQ")
if (self.afi == LISP_AFI_MAC): return("HHH")
if (self.afi == LISP_AFI_E164): return("II")
if (self.afi == LISP_AFI_LCAF): return("I")
return("")
#enddef
def pack_address(self):
packet_format = self.packet_format()
packet = b""
if (self.is_ipv4()):
packet = struct.pack(packet_format, socket.htonl(self.address))
elif (self.is_ipv6()):
addr1 = byte_swap_64(self.address >> 64)
addr2 = byte_swap_64(self.address & 0xffffffffffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_mac()):
addr = self.address
addr1 = (addr >> 32) & 0xffff
addr2 = (addr >> 16) & 0xffff
addr3 = addr & 0xffff
packet = struct.pack(packet_format, addr1, addr2, addr3)
elif (self.is_e164()):
addr = self.address
addr1 = (addr >> 32) & 0xffffffff
addr2 = (addr & 0xffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_dist_name()):
packet += (self.address + "\0").encode()
#endif
return(packet)
#enddef
def unpack_address(self, packet):
packet_format = self.packet_format()
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
addr = struct.unpack(packet_format, packet[:format_size])
if (self.is_ipv4()):
self.address = socket.ntohl(addr[0])
elif (self.is_ipv6()):
#
# Sigh, we have a high-order byte with zero-fill issue when
# parsing a binary IPv6 address from a packet. If we have an
# address that starts with fe::, then addr[0] is one byte in
# length and byte-swapping is not necessary (or we would make
# the high-order 16 bits 00fe). Sigh.
#
if (addr[0] <= 0xffff and (addr[0] & 0xff) == 0):
high = (addr[0] << 48) << 64
else:
high = byte_swap_64(addr[0]) << 64
#endif
low = byte_swap_64(addr[1])
self.address = high | low
elif (self.is_mac()):
short1 = addr[0]
short2 = addr[1]
short3 = addr[2]
self.address = (short1 << 32) + (short2 << 16) + short3
elif (self.is_e164()):
self.address = (addr[0] << 32) + addr[1]
elif (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
format_size = 0
#endif
packet = packet[format_size::]
return(packet)
#enddef
def is_ipv4(self):
return(True if (self.afi == LISP_AFI_IPV4) else False)
#enddef
def is_ipv4_link_local(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 16) & 0xffff) == 0xa9fe)
#enddef
def is_ipv4_loopback(self):
if (self.is_ipv4() == False): return(False)
return(self.address == 0x7f000001)
#enddef
def is_ipv4_multicast(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 24) & 0xf0) == 0xe0)
#enddef
def is_ipv4_string(self, addr_str):
return(addr_str.find(".") != -1)
#enddef
def is_ipv6(self):
return(True if (self.afi == LISP_AFI_IPV6) else False)
#enddef
def is_ipv6_link_local(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 112) & 0xffff) == 0xfe80)
#enddef
def is_ipv6_string_link_local(self, addr_str):
return(addr_str.find("fe80::") != -1)
#enddef
def is_ipv6_loopback(self):
if (self.is_ipv6() == False): return(False)
return(self.address == 1)
#enddef
def is_ipv6_multicast(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 120) & 0xff) == 0xff)
#enddef
def is_ipv6_string(self, addr_str):
return(addr_str.find(":") != -1)
#enddef
def is_mac(self):
return(True if (self.afi == LISP_AFI_MAC) else False)
#enddef
def is_mac_multicast(self):
if (self.is_mac() == False): return(False)
return((self.address & 0x010000000000) != 0)
#enddef
def is_mac_broadcast(self):
if (self.is_mac() == False): return(False)
return(self.address == 0xffffffffffff)
#enddef
def is_mac_string(self, addr_str):
return(len(addr_str) == 15 and addr_str.find("-") != -1)
#enddef
def is_link_local_multicast(self):
if (self.is_ipv4()):
return((0xe0ffff00 & self.address) == 0xe0000000)
#endif
if (self.is_ipv6()):
return((self.address >> 112) & 0xffff == 0xff02)
#endif
return(False)
#enddef
def is_null(self):
return(True if (self.afi == LISP_AFI_NONE) else False)
#enddef
def is_ultimate_root(self):
return(True if self.afi == LISP_AFI_ULTIMATE_ROOT else False)
#enddef
def is_iid_range(self):
return(True if self.afi == LISP_AFI_IID_RANGE else False)
#enddef
def is_e164(self):
return(True if (self.afi == LISP_AFI_E164) else False)
#enddef
def is_dist_name(self):
return(True if (self.afi == LISP_AFI_NAME) else False)
#enddef
def is_geo_prefix(self):
return(True if (self.afi == LISP_AFI_GEO_COORD) else False)
#enddef
def is_binary(self):
if (self.is_dist_name()): return(False)
if (self.is_geo_prefix()): return(False)
return(True)
#enddef
def store_address(self, addr_str):
if (self.afi == LISP_AFI_NONE): self.string_to_afi(addr_str)
#
# Parse instance-id.
#
i = addr_str.find("[")
j = addr_str.find("]")
if (i != -1 and j != -1):
self.instance_id = int(addr_str[i+1:j])
addr_str = addr_str[j+1::]
if (self.is_dist_name() == False):
addr_str = addr_str.replace(" ", "")
#endif
#endif
#
# Parse AFI based address.
#
if (self.is_ipv4()):
octet = addr_str.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
self.address = value
elif (self.is_ipv6()):
#
# There will be a common IPv6 address input mistake that will
# occur. The address ff::/8 (or an address ff::1) is actually
# encoded as 0x00ff as the high-order 16-bits. The correct way to
# specify the prefix is ff00::/8 but one would wonder why the
# lower order 0x00 bits are needed if a /8 is used. So to
# summarize:
#
# Entering ff::/8 will give you the 0::/8 prefix.
# Entering ff00::/8 is not the same as ff00::/16.
#
# Allow user to specify ff::/8 which allows for placing the the
# byte in the high-order byte of the 128-bit quantity. Check
# for double-colon in the input string to detect the single byte
# and then below byte-swap the first 2-bytes.
#
odd_byte = (addr_str[2:4] == "::")
try:
addr_str = socket.inet_pton(socket.AF_INET6, addr_str)
except:
addr_str = socket.inet_pton(socket.AF_INET6, "0::0")
#endtry
addr_str = binascii.hexlify(addr_str)
if (odd_byte):
addr_str = addr_str[2:4] + addr_str[0:2] + addr_str[4::]
#endif
self.address = int(addr_str, 16)
elif (self.is_geo_prefix()):
geo = lisp_geo(None)
geo.name = "geo-prefix-{}".format(geo)
geo.parse_geo_string(addr_str)
self.address = geo
elif (self.is_mac()):
addr_str = addr_str.replace("-", "")
value = int(addr_str, 16)
self.address = value
elif (self.is_e164()):
addr_str = addr_str[1::]
value = int(addr_str, 16)
self.address = value << 4
elif (self.is_dist_name()):
self.address = addr_str.replace("'", "")
#endif
self.mask_len = self.host_mask_len()
#enddef
def store_prefix(self, prefix_str):
if (self.is_geo_string(prefix_str)):
index = prefix_str.find("]")
mask_len = len(prefix_str[index+1::]) * 8
elif (prefix_str.find("/") != -1):
prefix_str, mask_len = prefix_str.split("/")
else:
left = prefix_str.find("'")
if (left == -1): return
right = prefix_str.find("'", left+1)
if (right == -1): return
mask_len = len(prefix_str[left+1:right]) * 8
#endif
self.string_to_afi(prefix_str)
self.store_address(prefix_str)
self.mask_len = int(mask_len)
#enddef
def zero_host_bits(self):
if (self.mask_len < 0): return
mask = (2 ** self.mask_len) - 1
shift = self.addr_length() * 8 - self.mask_len
mask <<= shift
self.address &= mask
#enddef
def is_geo_string(self, addr_str):
index = addr_str.find("]")
if (index != -1): addr_str = addr_str[index+1::]
geo = addr_str.split("/")
if (len(geo) == 2):
if (geo[1].isdigit() == False): return(False)
#endif
geo = geo[0]
geo = geo.split("-")
geo_len = len(geo)
if (geo_len < 8 or geo_len > 9): return(False)
for num in range(0, geo_len):
if (num == 3):
if (geo[num] in ["N", "S"]): continue
return(False)
#enif
if (num == 7):
if (geo[num] in ["W", "E"]): continue
return(False)
#endif
if (geo[num].isdigit() == False): return(False)
#endfor
return(True)
#enddef
def string_to_afi(self, addr_str):
if (addr_str.count("'") == 2):
self.afi = LISP_AFI_NAME
return
#endif
if (addr_str.find(":") != -1): self.afi = LISP_AFI_IPV6
elif (addr_str.find(".") != -1): self.afi = LISP_AFI_IPV4
elif (addr_str.find("+") != -1): self.afi = LISP_AFI_E164
elif (self.is_geo_string(addr_str)): self.afi = LISP_AFI_GEO_COORD
elif (addr_str.find("-") != -1): self.afi = LISP_AFI_MAC
else: self.afi = LISP_AFI_NONE
#enddef
def print_address(self):
addr = self.print_address_no_iid()
iid = "[" + str(self.instance_id)
for i in self.iid_list: iid += "," + str(i)
iid += "]"
addr = "{}{}".format(iid, addr)
return(addr)
#enddef
def print_address_no_iid(self):
if (self.is_ipv4()):
addr = self.address
value1 = addr >> 24
value2 = (addr >> 16) & 0xff
value3 = (addr >> 8) & 0xff
value4 = addr & 0xff
return("{}.{}.{}.{}".format(value1, value2, value3, value4))
elif (self.is_ipv6()):
addr_str = lisp_hex_string(self.address).zfill(32)
addr_str = binascii.unhexlify(addr_str)
addr_str = socket.inet_ntop(socket.AF_INET6, addr_str)
return("{}".format(addr_str))
elif (self.is_geo_prefix()):
return("{}".format(self.address.print_geo()))
elif (self.is_mac()):
addr_str = lisp_hex_string(self.address).zfill(12)
addr_str = "{}-{}-{}".format(addr_str[0:4], addr_str[4:8],
addr_str[8:12])
return("{}".format(addr_str))
elif (self.is_e164()):
addr_str = lisp_hex_string(self.address).zfill(15)
return("+{}".format(addr_str))
elif (self.is_dist_name()):
return("'{}'".format(self.address))
elif (self.is_null()):
return("no-address")
#endif
return("unknown-afi:{}".format(self.afi))
#enddef
def print_prefix(self):
if (self.is_ultimate_root()): return("[*]")
if (self.is_iid_range()):
if (self.mask_len == 32): return("[{}]".format(self.instance_id))
upper = self.instance_id + (2**(32 - self.mask_len) - 1)
return("[{}-{}]".format(self.instance_id, upper))
#endif
addr = self.print_address()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
index = addr.find("no-address")
if (index == -1):
addr = "{}/{}".format(addr, str(self.mask_len))
else:
addr = addr[0:index]
#endif
return(addr)
#enddef
def print_prefix_no_iid(self):
addr = self.print_address_no_iid()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
return("{}/{}".format(addr, str(self.mask_len)))
#enddef
def print_prefix_url(self):
if (self.is_ultimate_root()): return("0--0")
addr = self.print_address()
index = addr.find("]")
if (index != -1): addr = addr[index+1::]
if (self.is_geo_prefix()):
addr = addr.replace("/", "-")
return("{}-{}".format(self.instance_id, addr))
#endif
return("{}-{}-{}".format(self.instance_id, addr, self.mask_len))
#enddef
def print_sg(self, g):
s = self.print_prefix()
si = s.find("]") + 1
g = g.print_prefix()
gi = g.find("]") + 1
sg_str = "[{}]({}, {})".format(self.instance_id, s[si::], g[gi::])
return(sg_str)
#enddef
def hash_address(self, addr):
addr1 = self.address
addr2 = addr.address
if (self.is_geo_prefix()): addr1 = self.address.print_geo()
if (addr.is_geo_prefix()): addr2 = addr.address.print_geo()
if (type(addr1) == str):
addr1 = int(binascii.hexlify(addr1[0:1]))
#endif
if (type(addr2) == str):
addr2 = int(binascii.hexlify(addr2[0:1]))
#endif
return(addr1 ^ addr2)
#enddef
#
# Is self more specific or equal to the prefix supplied in variable
# 'prefix'. Return True if so.
#
def is_more_specific(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT): return(True)
mask_len = prefix.mask_len
if (prefix.afi == LISP_AFI_IID_RANGE):
size = 2**(32 - mask_len)
lower = prefix.instance_id
upper = lower + size
return(self.instance_id in range(lower, upper))
#endif
if (self.instance_id != prefix.instance_id): return(False)
if (self.afi != prefix.afi):
if (prefix.afi != LISP_AFI_NONE): return(False)
#endif
#
# Handle string addresses like distinguished names and geo-prefixes.
#
if (self.is_binary() == False):
if (prefix.afi == LISP_AFI_NONE): return(True)
if (type(self.address) != type(prefix.address)): return(False)
addr = self.address
paddr = prefix.address
if (self.is_geo_prefix()):
addr = self.address.print_geo()
paddr = prefix.address.print_geo()
#endif
if (len(addr) < len(paddr)): return(False)
return(addr.find(paddr) == 0)
#endif
#
# Handle numeric addresses.
#
if (self.mask_len < mask_len): return(False)
shift = (prefix.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
return((self.address & mask) == prefix.address)
#enddef
def mask_address(self, mask_len):
shift = (self.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
self.address &= mask
#enddef
def is_exact_match(self, prefix):
if (self.instance_id != prefix.instance_id): return(False)
p1 = self.print_prefix()
p2 = prefix.print_prefix() if prefix else ""
return(p1 == p2)
#enddef
def is_local(self):
if (self.is_ipv4()):
local = lisp_myrlocs[0]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
if (self.is_ipv6()):
local = lisp_myrlocs[1]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
return(False)
#enddef
def store_iid_range(self, iid, mask_len):
if (self.afi == LISP_AFI_NONE):
if (iid == 0 and mask_len == 0): self.afi = LISP_AFI_ULTIMATE_ROOT
else: self.afi = LISP_AFI_IID_RANGE
#endif
self.instance_id = iid
self.mask_len = mask_len
#enddef
def lcaf_length(self, lcaf_type):
length = self.addr_length() + 2
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE): length += 4
if (lcaf_type == LISP_LCAF_ASN_TYPE): length += 4
if (lcaf_type == LISP_LCAF_APP_DATA_TYPE): length += 8
if (lcaf_type == LISP_LCAF_GEO_COORD_TYPE): length += 12
if (lcaf_type == LISP_LCAF_OPAQUE_TYPE): length += 0
if (lcaf_type == LISP_LCAF_NAT_TYPE): length += 4
if (lcaf_type == LISP_LCAF_NONCE_LOC_TYPE): length += 4
if (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE): length = length * 2 + 8
if (lcaf_type == LISP_LCAF_ELP_TYPE): length += 0
if (lcaf_type == LISP_LCAF_SECURITY_TYPE): length += 6
if (lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_RLE_TYPE): length += 4
return(length)
#enddef
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_iid(self):
lcaf_type = LISP_LCAF_INSTANCE_ID_TYPE
addr_length = socket.htons(self.lcaf_length(lcaf_type))
iid = self.instance_id
afi = self.afi
ml = 0
if (afi < 0):
if (self.afi == LISP_AFI_GEO_COORD):
afi = LISP_AFI_LCAF
ml = 0
else:
afi = 0
ml = self.mask_len
#endif
#endif
lcaf = struct.pack("BBBBH", 0, 0, lcaf_type, ml, addr_length)
lcaf += struct.pack("IH", socket.htonl(iid), socket.htons(afi))
if (afi == 0): return(lcaf)
if (self.afi == LISP_AFI_GEO_COORD):
lcaf = lcaf[0:-2]
lcaf += self.address.encode_geo()
return(lcaf)
#endif
lcaf += self.pack_address()
return(lcaf)
#enddef
def lcaf_decode_iid(self, packet):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
x, y, lcaf_type, iid_ml, length = struct.unpack(packet_format,
packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_INSTANCE_ID_TYPE): return(None)
packet_format = "IH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
iid, afi = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
length = socket.ntohs(length)
self.instance_id = socket.ntohl(iid)
afi = socket.ntohs(afi)
self.afi = afi
if (iid_ml != 0 and afi == 0): self.mask_len = iid_ml
if (afi == 0):
self.afi = LISP_AFI_IID_RANGE if iid_ml else LISP_AFI_ULTIMATE_ROOT
#endif
#
# No address encoded.
#
if (afi == 0): return(packet)
#
# Look for distinguished-name.
#
if (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
return(packet)
#endif
#
# Only process geo-prefixes inside of an LCAF encoded Instance-ID type.
#
if (afi == LISP_AFI_LCAF):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
return(packet)
#endif
addr_length = self.addr_length()
if (len(packet) < addr_length): return(None)
packet = self.unpack_address(packet)
return(packet)
#enddef
#
# Multicast Info Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 9 | Rsvd2 |R|L|J| 8 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Source MaskLen| Group MaskLen |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Source/Subnet Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Group Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_sg(self, group):
lcaf_type = LISP_LCAF_MCAST_INFO_TYPE
iid = socket.htonl(self.instance_id)
addr_length = socket.htons(self.lcaf_length(lcaf_type))
lcaf = struct.pack("BBBBHIHBB", 0, 0, lcaf_type, 0, addr_length, iid,
0, self.mask_len, group.mask_len)
lcaf += struct.pack("H", socket.htons(self.afi))
lcaf += self.pack_address()
lcaf += struct.pack("H", socket.htons(group.afi))
lcaf += group.pack_address()
return(lcaf)
#enddef
def lcaf_decode_sg(self, packet):
packet_format = "BBBBHIHBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
x, y, lcaf_type, rsvd, length, iid, z, sml, gml = \
struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_MCAST_INFO_TYPE): return([None, None])
self.instance_id = socket.ntohl(iid)
length = socket.ntohs(length) - 8
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
self.afi = socket.ntohs(afi)
self.mask_len = sml
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = self.unpack_address(packet)
if (packet == None): return([None, None])
length -= addr_length
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
group = lisp_address(LISP_AFI_NONE, "", 0, 0)
group.afi = socket.ntohs(afi)
group.mask_len = gml
group.instance_id = self.instance_id
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = group.unpack_address(packet)
if (packet == None): return([None, None])
return([packet, group])
#enddef
def lcaf_decode_eid(self, packet):
packet_format = "BBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
#
# Do not advance packet pointer. The specific LCAF decoders will do
# it themselves.
#
rsvd, flags, lcaf_type = struct.unpack(packet_format,
packet[:format_size])
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE):
return([self.lcaf_decode_iid(packet), None])
elif (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE):
packet, group = self.lcaf_decode_sg(packet)
return([packet, group])
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.instance_id = 0
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
#endif
return([packet, None])
#enddef
#endclass
#
# Data structure for storing learned or configured ELPs.
#
class lisp_elp_node(object):
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.probe = False
self.strict = False
self.eid = False
self.we_are_last = False
#enddef
def copy_elp_node(self):
elp_node = lisp_elp_node()
elp_node.copy_address(self.address)
elp_node.probe = self.probe
elp_node.strict = self.strict
elp_node.eid = self.eid
elp_node.we_are_last = self.we_are_last
return(elp_node)
#enddef
#endclass
class lisp_elp(object):
def __init__(self, name):
self.elp_name = name
self.elp_nodes = []
self.use_elp_node = None
self.we_are_last = False
#enddef
def copy_elp(self):
elp = lisp_elp(self.elp_name)
elp.use_elp_node = self.use_elp_node
elp.we_are_last = self.we_are_last
for elp_node in self.elp_nodes:
elp.elp_nodes.append(elp_node.copy_elp_node())
#endfor
return(elp)
#enddef
def print_elp(self, want_marker):
elp_str = ""
for elp_node in self.elp_nodes:
use_or_last = ""
if (want_marker):
if (elp_node == self.use_elp_node):
use_or_last = "*"
elif (elp_node.we_are_last):
use_or_last = "x"
#endif
#endif
elp_str += "{}{}({}{}{}), ".format(use_or_last,
elp_node.address.print_address_no_iid(),
"r" if elp_node.eid else "R", "P" if elp_node.probe else "p",
"S" if elp_node.strict else "s")
#endfor
return(elp_str[0:-2] if elp_str != "" else "")
#enddef
def select_elp_node(self):
v4, v6, device = lisp_myrlocs
index = None
for elp_node in self.elp_nodes:
if (v4 and elp_node.address.is_exact_match(v4)):
index = self.elp_nodes.index(elp_node)
break
#endif
if (v6 and elp_node.address.is_exact_match(v6)):
index = self.elp_nodes.index(elp_node)
break
#endif
#endfor
#
# If we did not find a match, this is possibly an ITR. We need to give
# if the first ELP node.
#
if (index == None):
self.use_elp_node = self.elp_nodes[0]
elp_node.we_are_last = False
return
#endif
#
# If we matched the last item in the ELP nodes, we are the end of the
# path. Flag it for display purposes and return None.
#
if (self.elp_nodes[-1] == self.elp_nodes[index]):
self.use_elp_node = None
elp_node.we_are_last = True
return
#endif
#
# Return the next node after the one that matches this system.
#
self.use_elp_node = self.elp_nodes[index+1]
return
#enddef
#endclass
class lisp_geo(object):
def __init__(self, name):
self.geo_name = name
self.latitude = 0xffffffff # Negative when North, otherwise South
self.lat_mins = 0
self.lat_secs = 0
self.longitude = 0xffffffff # Negative when East, otherwise West
self.long_mins = 0
self.long_secs = 0
self.altitude = -1
self.radius = 0
#enddef
def copy_geo(self):
geo = lisp_geo(self.geo_name)
geo.latitude = self.latitude
geo.lat_mins = self.lat_mins
geo.lat_secs = self.lat_secs
geo.longitude = self.longitude
geo.long_mins = self.long_mins
geo.long_secs = self.long_secs
geo.altitude = self.altitude
geo.radius = self.radius
return(geo)
#enddef
def no_geo_altitude(self):
return(self.altitude == -1)
#enddef
def parse_geo_string(self, geo_str):
index = geo_str.find("]")
if (index != -1): geo_str = geo_str[index+1::]
#
# Check if radius is specified. That is a geo-prefix and not just a
# geo-point.
#
if (geo_str.find("/") != -1):
geo_str, radius = geo_str.split("/")
self.radius = int(radius)
#endif
geo_str = geo_str.split("-")
if (len(geo_str) < 8): return(False)
latitude = geo_str[0:4]
longitude = geo_str[4:8]
#
# Get optional altitude.
#
if (len(geo_str) > 8): self.altitude = int(geo_str[8])
#
# Get latitude values.
#
self.latitude = int(latitude[0])
self.lat_mins = int(latitude[1])
self.lat_secs = int(latitude[2])
if (latitude[3] == "N"): self.latitude = -self.latitude
#
# Get longitude values.
#
self.longitude = int(longitude[0])
self.long_mins = int(longitude[1])
self.long_secs = int(longitude[2])
if (longitude[3] == "E"): self.longitude = -self.longitude
return(True)
#enddef
def print_geo(self):
n_or_s = "N" if self.latitude < 0 else "S"
e_or_w = "E" if self.longitude < 0 else "W"
geo_str = "{}-{}-{}-{}-{}-{}-{}-{}".format(abs(self.latitude),
self.lat_mins, self.lat_secs, n_or_s, abs(self.longitude),
self.long_mins, self.long_secs, e_or_w)
if (self.no_geo_altitude() == False):
geo_str += "-" + str(self.altitude)
#endif
#
# Print "/<radius>" if not 0.
#
if (self.radius != 0): geo_str += "/{}".format(self.radius)
return(geo_str)
#enddef
def geo_url(self):
zoom = os.getenv("LISP_GEO_ZOOM_LEVEL")
zoom = "10" if (zoom == "" or zoom.isdigit() == False) else zoom
lat, lon = self.dms_to_decimal()
url = ("http://maps.googleapis.com/maps/api/staticmap?center={},{}" + \
"&markers=color:blue%7Clabel:lisp%7C{},{}" + \
"&zoom={}&size=1024x1024&sensor=false").format(lat, lon, lat, lon,
zoom)
return(url)
#enddef
def print_geo_url(self):
geo = self.print_geo()
if (self.radius == 0):
url = self.geo_url()
string = "<a href='{}'>{}</a>".format(url, geo)
else:
url = geo.replace("/", "-")
string = "<a href='/lisp/geo-map/{}'>{}</a>".format(url, geo)
#endif
return(string)
#enddef
def dms_to_decimal(self):
degs, mins, secs = self.latitude, self.lat_mins, self.lat_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_lat = dd
degs, mins, secs = self.longitude, self.long_mins, self.long_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_long = dd
return((dd_lat, dd_long))
#enddef
def get_distance(self, geo_point):
dd_prefix = self.dms_to_decimal()
dd_point = geo_point.dms_to_decimal()
distance = geopy.distance.distance(dd_prefix, dd_point)
return(distance.km)
#enddef
def point_in_circle(self, geo_point):
km = self.get_distance(geo_point)
return(km <= self.radius)
#enddef
def encode_geo(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
geo_len = socket.htons(20 + 2)
flags = 0
lat = abs(self.latitude)
lat_ms = ((self.lat_mins * 60) + self.lat_secs) * 1000
if (self.latitude < 0): flags |= 0x40
lon = abs(self.longitude)
lon_ms = ((self.long_mins * 60) + self.long_secs) * 1000
if (self.longitude < 0): flags |= 0x20
alt = 0
if (self.no_geo_altitude() == False):
alt = socket.htonl(self.altitude)
flags |= 0x10
#endif
radius = socket.htons(self.radius)
if (radius != 0): flags |= 0x06
pkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_GEO_COORD_TYPE,
0, geo_len)
pkt += struct.pack("BBHBBHBBHIHHH", flags, 0, 0, lat, lat_ms >> 16,
socket.htons(lat_ms & 0x0ffff), lon, lon_ms >> 16,
socket.htons(lon_ms & 0xffff), alt, radius, 0, 0)
return(pkt)
#enddef
def decode_geo(self, packet, lcaf_len, radius_hi):
packet_format = "BBHBBHBBHIHHH"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
flags, r1, uncertainty, lat, lat_hi, lat_ms, lon, lon_hi, lon_ms, \
alt, radius, r2, afi = struct.unpack(packet_format,
packet[:format_size])
#
# No nested LCAFs in Geo-Coord type.
#
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
if (flags & 0x40): lat = -lat
self.latitude = lat
lat_secs = old_div(((lat_hi << 16) | socket.ntohs(lat_ms)), 1000)
self.lat_mins = old_div(lat_secs, 60)
self.lat_secs = lat_secs % 60
if (flags & 0x20): lon = -lon
self.longitude = lon
lon_secs = old_div(((lon_hi << 16) | socket.ntohs(lon_ms)), 1000)
self.long_mins = old_div(lon_secs, 60)
self.long_secs = lon_secs % 60
self.altitude = socket.ntohl(alt) if (flags & 0x10) else -1
radius = socket.ntohs(radius)
self.radius = radius if (flags & 0x02) else radius * 1000
self.geo_name = None
packet = packet[format_size::]
if (afi != 0):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
return(packet)
#enddef
#endclass
#
# Structure for Replication List Entries.
#
class lisp_rle_node(object):
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.level = 0
self.translated_port = 0
self.rloc_name = None
#enddef
def copy_rle_node(self):
rle_node = lisp_rle_node()
rle_node.address.copy_address(self.address)
rle_node.level = self.level
rle_node.translated_port = self.translated_port
rle_node.rloc_name = self.rloc_name
return(rle_node)
#enddef
def store_translated_rloc(self, rloc, port):
self.address.copy_address(rloc)
self.translated_port = port
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.address.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
#endclass
class lisp_rle(object):
def __init__(self, name):
self.rle_name = name
self.rle_nodes = []
self.rle_forwarding_list = []
#enddef
def copy_rle(self):
rle = lisp_rle(self.rle_name)
for rle_node in self.rle_nodes:
rle.rle_nodes.append(rle_node.copy_rle_node())
#endfor
rle.build_forwarding_list()
return(rle)
#enddef
def print_rle(self, html, do_formatting):
rle_str = ""
for rle_node in self.rle_nodes:
port = rle_node.translated_port
rle_name_str = ""
if (rle_node.rloc_name != None):
rle_name_str = rle_node.rloc_name
if (do_formatting): rle_name_str = blue(rle_name_str, html)
rle_name_str = "({})".format(rle_name_str)
#endif
addr_str = rle_node.address.print_address_no_iid()
if (rle_node.address.is_local()): addr_str = red(addr_str, html)
rle_str += "{}{}{}, ".format(addr_str, "" if port == 0 else \
":" + str(port), rle_name_str)
#endfor
return(rle_str[0:-2] if rle_str != "" else "")
#enddef
def build_forwarding_list(self):
level = -1
for rle_node in self.rle_nodes:
if (level == -1):
if (rle_node.address.is_local()): level = rle_node.level
else:
if (rle_node.level > level): break
#endif
#endfor
level = 0 if level == -1 else rle_node.level
self.rle_forwarding_list = []
for rle_node in self.rle_nodes:
if (rle_node.level == level or (level == 0 and
rle_node.level == 128)):
if (lisp_i_am_rtr == False and rle_node.address.is_local()):
addr_str = rle_node.address.print_address_no_iid()
lprint("Exclude local RLE RLOC {}".format(addr_str))
continue
#endif
self.rle_forwarding_list.append(rle_node)
#endif
#endfor
#enddef
#endclass
class lisp_json(object):
def __init__(self, name, string, encrypted=False, ms_encrypt=False):
#
# Deal with py3.
#
if (type(string) == bytes): string = string.decode()
self.json_name = name
self.json_encrypted = False
try:
json.loads(string)
except:
lprint("Invalid JSON string: '{}'".format(string))
string = '{ "?" : "?" }'
#endtry
self.json_string = string
#
# Decide to encrypt or decrypt. The map-server encrypts and stores
# ciphertext in mapping system. The lig client decrypts to show user
# data if it has the key in env variable LISP_JSON_KEY. Format of
# env variable is "<key>" or "[<key-id>]<key>".
#
# If the LISP site-eid is not configured to encrypt the JSON than
# store in plaintext.
#
if (len(lisp_ms_json_keys) != 0):
if (ms_encrypt == False): return
self.json_key_id = list(lisp_ms_json_keys.keys())[0]
self.json_key = lisp_ms_json_keys[self.json_key_id]
self.encrypt_json()
#endif
if (lisp_log_id == "lig" and encrypted):
key = os.getenv("LISP_JSON_KEY")
if (key != None):
index = -1
if (key[0] == "[" and "]" in key):
index = key.find("]")
self.json_key_id = int(key[1:index])
#endif
self.json_key = key[index+1::]
#endif
self.decrypt_json()
#endif
#endif
#enddef
def add(self):
self.delete()
lisp_json_list[self.json_name] = self
#enddef
def delete(self):
if (self.json_name in lisp_json_list):
del(lisp_json_list[self.json_name])
lisp_json_list[self.json_name] = None
#endif
#enddef
def print_json(self, html):
good_string = self.json_string
bad = "***"
if (html): bad = red(bad, html)
bad_string = bad + self.json_string + bad
if (self.valid_json()): return(good_string)
return(bad_string)
#enddef
def valid_json(self):
try:
json.loads(self.json_string)
except:
return(False)
#endtry
return(True)
#enddef
def encrypt_json(self):
ekey = self.json_key.zfill(32)
iv = "0" * 8
jd = json.loads(self.json_string)
for key in jd:
value = jd[key]
if (type(value) != str): value = str(value)
value = chacha.ChaCha(ekey, iv).encrypt(value)
jd[key] = binascii.hexlify(value)
#endfor
self.json_string = json.dumps(jd)
self.json_encrypted = True
#enddef
def decrypt_json(self):
ekey = self.json_key.zfill(32)
iv = "0" * 8
jd = json.loads(self.json_string)
for key in jd:
value = binascii.unhexlify(jd[key])
jd[key] = chacha.ChaCha(ekey, iv).encrypt(value)
#endfor
try:
self.json_string = json.dumps(jd)
self.json_encrypted = False
except:
pass
#endtry
#enddef
#endclass
#
# LISP forwarding stats info.
#
class lisp_stats(object):
def __init__(self):
self.packet_count = 0
self.byte_count = 0
self.last_rate_check = 0
self.last_packet_count = 0
self.last_byte_count = 0
self.last_increment = None
#enddef
def increment(self, octets):
self.packet_count += 1
self.byte_count += octets
self.last_increment = lisp_get_timestamp()
#enddef
def recent_packet_sec(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 1)
#enddef
def recent_packet_min(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 60)
#enddef
def stat_colors(self, c1, c2, html):
if (self.recent_packet_sec()):
return(green_last_sec(c1), green_last_sec(c2))
#endif
if (self.recent_packet_min()):
return(green_last_min(c1), green_last_min(c2))
#endif
return(c1, c2)
#enddef
def normalize(self, count):
count = str(count)
digits = len(count)
if (digits > 12):
count = count[0:-10] + "." + count[-10:-7] + "T"
return(count)
#endif
if (digits > 9):
count = count[0:-9] + "." + count[-9:-7] + "B"
return(count)
#endif
if (digits > 6):
count = count[0:-6] + "." + count[-6] + "M"
return(count)
#endif
return(count)
#enddef
def get_stats(self, summary, html):
last_rate = self.last_rate_check
last_packets = self.last_packet_count
last_bytes = self.last_byte_count
self.last_rate_check = lisp_get_timestamp()
self.last_packet_count = self.packet_count
self.last_byte_count = self.byte_count
rate_diff = self.last_rate_check - last_rate
if (rate_diff == 0):
packet_rate = 0
bit_rate = 0
else:
packet_rate = int(old_div((self.packet_count - last_packets),
rate_diff))
bit_rate = old_div((self.byte_count - last_bytes), rate_diff)
bit_rate = old_div((bit_rate * 8), 1000000)
bit_rate = round(bit_rate, 2)
#endif
#
# Normalize and put in string form.
#
packets = self.normalize(self.packet_count)
bc = self.normalize(self.byte_count)
#
# The summary version gives you the string above in a pull-down html
# menu and the title string is the string below.
#
if (summary):
h = "<br>" if html else ""
packets, bc = self.stat_colors(packets, bc, html)
title = "packet-count: {}{}byte-count: {}".format(packets, h, bc)
stats = "packet-rate: {} pps\nbit-rate: {} Mbps".format( \
packet_rate, bit_rate)
if (html != ""): stats = lisp_span(title, stats)
else:
prate = str(packet_rate)
brate = str(bit_rate)
if (html):
packets = lisp_print_cour(packets)
prate = lisp_print_cour(prate)
bc = lisp_print_cour(bc)
brate = lisp_print_cour(brate)
#endif
h = "<br>" if html else ", "
stats = ("packet-count: {}{}packet-rate: {} pps{}byte-count: " + \
"{}{}bit-rate: {} mbps").format(packets, h, prate, h, bc, h,
brate)
#endif
return(stats)
#enddef
#endclass
#
# ETR/RTR decapsulation total packet and errors stats. Anytime a lisp_packet().
# packet_error value is added, this dictionary array needs to add the key
# string.
#
lisp_decap_stats = {
"good-packets" : lisp_stats(), "ICV-error" : lisp_stats(),
"checksum-error" : lisp_stats(), "lisp-header-error" : lisp_stats(),
"no-decrypt-key" : lisp_stats(), "bad-inner-version" : lisp_stats(),
"outer-header-error" : lisp_stats()
}
#
# This a locator record definition as defined in RFCs.
#
class lisp_rloc(object):
def __init__(self, recurse=True):
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_name = None
self.interface = None
self.translated_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.translated_port = 0
self.priority = 255
self.weight = 0
self.mpriority = 255
self.mweight = 0
self.uptime = lisp_get_timestamp()
self.state = LISP_RLOC_UP_STATE
self.last_state_change = None
self.rle_name = None
self.elp_name = None
self.geo_name = None
self.json_name = None
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.stats = lisp_stats()
self.last_rloc_probe = None
self.last_rloc_probe_reply = None
self.rloc_probe_rtt = -1
self.recent_rloc_probe_rtts = [-1, -1, -1]
self.rloc_probe_hops = "?/?"
self.recent_rloc_probe_hops = ["?/?", "?/?", "?/?"]
self.rloc_probe_latency = "?/?"
self.recent_rloc_probe_latencies = ["?/?", "?/?", "?/?"]
self.last_rloc_probe_nonce = 0
self.echo_nonce_capable = False
self.map_notify_requested = False
self.rloc_next_hop = None
self.next_rloc = None
self.multicast_rloc_probe_list = {}
if (recurse == False): return
#
# This is for a box with multiple egress interfaces. We create an
# rloc chain, one for each <device, nh> tuple. So we can RLOC-probe
# individually.
#
next_hops = lisp_get_default_route_next_hops()
if (next_hops == [] or len(next_hops) == 1): return
self.rloc_next_hop = next_hops[0]
last = self
for nh in next_hops[1::]:
hop = lisp_rloc(False)
hop = copy.deepcopy(self)
hop.rloc_next_hop = nh
last.next_rloc = hop
last = hop
#endfor
#enddef
def up_state(self):
return(self.state == LISP_RLOC_UP_STATE)
#enddef
def unreach_state(self):
return(self.state == LISP_RLOC_UNREACH_STATE)
#enddef
def no_echoed_nonce_state(self):
return(self.state == LISP_RLOC_NO_ECHOED_NONCE_STATE)
#enddef
def down_state(self):
return(self.state in \
[LISP_RLOC_DOWN_STATE, LISP_RLOC_ADMIN_DOWN_STATE])
#enddef
def print_state(self):
if (self.state is LISP_RLOC_UNKNOWN_STATE):
return("unknown-state")
if (self.state is LISP_RLOC_UP_STATE):
return("up-state")
if (self.state is LISP_RLOC_DOWN_STATE):
return("down-state")
if (self.state is LISP_RLOC_ADMIN_DOWN_STATE):
return("admin-down-state")
if (self.state is LISP_RLOC_UNREACH_STATE):
return("unreach-state")
if (self.state is LISP_RLOC_NO_ECHOED_NONCE_STATE):
return("no-echoed-nonce-state")
return("invalid-state")
#enddef
def print_rloc(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}".format(indent,
red(self.rloc.print_address(), False), ts, self.print_state(),
self.priority, self.weight, self.mpriority, self.mweight))
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def store_rloc_from_record(self, rloc_record, nonce, source):
port = LISP_DATA_PORT
self.rloc.copy_address(rloc_record.rloc)
self.rloc_name = rloc_record.rloc_name
#
# Store translated port if RLOC was translated by a NAT.
#
rloc = self.rloc
if (rloc.is_null() == False):
nat_info = lisp_get_nat_info(rloc, self.rloc_name)
if (nat_info):
port = nat_info.port
head = lisp_nat_state_info[self.rloc_name][0]
addr_str = rloc.print_address_no_iid()
rloc_str = red(addr_str, False)
rloc_nstr = "" if self.rloc_name == None else \
blue(self.rloc_name, False)
#
# Don't use timed-out state. And check if the RLOC from the
# RLOC-record is different than the youngest NAT state.
#
if (nat_info.timed_out()):
lprint((" Matched stored NAT state timed out for " + \
"RLOC {}:{}, {}").format(rloc_str, port, rloc_nstr))
nat_info = None if (nat_info == head) else head
if (nat_info and nat_info.timed_out()):
port = nat_info.port
rloc_str = red(nat_info.address, False)
lprint((" Youngest stored NAT state timed out " + \
" for RLOC {}:{}, {}").format(rloc_str, port,
rloc_nstr))
nat_info = None
#endif
#endif
#
# Check to see if RLOC for map-cache is same RLOC for NAT
# state info.
#
if (nat_info):
if (nat_info.address != addr_str):
lprint("RLOC conflict, RLOC-record {}, NAT state {}". \
format(rloc_str, red(nat_info.address, False)))
self.rloc.store_address(nat_info.address)
#endif
rloc_str = red(nat_info.address, False)
port = nat_info.port
lprint(" Use NAT translated RLOC {}:{} for {}". \
format(rloc_str, port, rloc_nstr))
self.store_translated_rloc(rloc, port)
#endif
#endif
#endif
self.geo = rloc_record.geo
self.elp = rloc_record.elp
self.json = rloc_record.json
#
# RLE nodes may be behind NATs too.
#
self.rle = rloc_record.rle
if (self.rle):
for rle_node in self.rle.rle_nodes:
rloc_name = rle_node.rloc_name
nat_info = lisp_get_nat_info(rle_node.address, rloc_name)
if (nat_info == None): continue
port = nat_info.port
rloc_name_str = rloc_name
if (rloc_name_str): rloc_name_str = blue(rloc_name, False)
lprint((" Store translated encap-port {} for RLE-" + \
"node {}, rloc-name '{}'").format(port,
rle_node.address.print_address_no_iid(), rloc_name_str))
rle_node.translated_port = port
#endfor
#endif
self.priority = rloc_record.priority
self.mpriority = rloc_record.mpriority
self.weight = rloc_record.weight
self.mweight = rloc_record.mweight
if (rloc_record.reach_bit and rloc_record.local_bit and
rloc_record.probe_bit == False): self.state = LISP_RLOC_UP_STATE
#
# Store keys in RLOC lisp-crypto data structure.
#
rloc_is_source = source.is_exact_match(rloc_record.rloc) if \
source != None else None
if (rloc_record.keys != None and rloc_is_source):
key = rloc_record.keys[1]
if (key != None):
addr_str = rloc_record.rloc.print_address_no_iid() + ":" + \
str(port)
key.add_key_by_rloc(addr_str, True)
lprint(" Store encap-keys for nonce 0x{}, RLOC {}".format( \
lisp_hex_string(nonce), red(addr_str, False)))
#endif
#endif
return(port)
#enddef
def store_translated_rloc(self, rloc, port):
self.rloc.copy_address(rloc)
self.translated_rloc.copy_address(rloc)
self.translated_port = port
#enddef
def is_rloc_translated(self):
return(self.translated_rloc.is_null() == False)
#enddef
def rloc_exists(self):
if (self.rloc.is_null() == False): return(True)
if (self.rle_name or self.geo_name or self.elp_name or self.json_name):
return(False)
#endif
return(True)
#enddef
def is_rtr(self):
return((self.priority == 254 and self.mpriority == 255 and \
self.weight == 0 and self.mweight == 0))
#enddef
def print_state_change(self, new_state):
current_state = self.print_state()
string = "{} -> {}".format(current_state, new_state)
if (new_state == "up" and self.unreach_state()):
string = bold(string, False)
#endif
return(string)
#enddef
def print_rloc_probe_rtt(self):
if (self.rloc_probe_rtt == -1): return("none")
return(self.rloc_probe_rtt)
#enddef
def print_recent_rloc_probe_rtts(self):
rtts = str(self.recent_rloc_probe_rtts)
rtts = rtts.replace("-1", "?")
return(rtts)
#enddef
def compute_rloc_probe_rtt(self):
last = self.rloc_probe_rtt
self.rloc_probe_rtt = -1
if (self.last_rloc_probe_reply == None): return
if (self.last_rloc_probe == None): return
self.rloc_probe_rtt = self.last_rloc_probe_reply - self.last_rloc_probe
self.rloc_probe_rtt = round(self.rloc_probe_rtt, 3)
last_list = self.recent_rloc_probe_rtts
self.recent_rloc_probe_rtts = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_hops(self):
return(self.rloc_probe_hops)
#enddef
def print_recent_rloc_probe_hops(self):
hops = str(self.recent_rloc_probe_hops)
return(hops)
#enddef
def store_rloc_probe_hops(self, to_hops, from_ttl):
if (to_hops == 0):
to_hops = "?"
elif (to_hops < old_div(LISP_RLOC_PROBE_TTL, 2)):
to_hops = "!"
else:
to_hops = str(LISP_RLOC_PROBE_TTL - to_hops)
#endif
if (from_ttl < old_div(LISP_RLOC_PROBE_TTL, 2)):
from_hops = "!"
else:
from_hops = str(LISP_RLOC_PROBE_TTL - from_ttl)
#endif
last = self.rloc_probe_hops
self.rloc_probe_hops = to_hops + "/" + from_hops
last_list = self.recent_rloc_probe_hops
self.recent_rloc_probe_hops = [last] + last_list[0:-1]
#enddef
def store_rloc_probe_latencies(self, json_telemetry):
tel = lisp_decode_telemetry(json_telemetry)
fl = round(float(tel["etr-in"]) - float(tel["itr-out"]), 3)
rl = round(float(tel["itr-in"]) - float(tel["etr-out"]), 3)
last = self.rloc_probe_latency
self.rloc_probe_latency = str(fl) + "/" + str(rl)
last_list = self.recent_rloc_probe_latencies
self.recent_rloc_probe_latencies = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_latency(self):
return(self.rloc_probe_latency)
#enddef
def print_recent_rloc_probe_latencies(self):
latencies = str(self.recent_rloc_probe_latencies)
return(latencies)
#enddef
def process_rloc_probe_reply(self, ts, nonce, eid, group, hc, ttl, jt):
rloc = self
while (True):
if (rloc.last_rloc_probe_nonce == nonce): break
rloc = rloc.next_rloc
if (rloc == None):
lprint(" No matching nonce state found for nonce 0x{}". \
format(lisp_hex_string(nonce)))
return
#endif
#endwhile
#
# Compute RTTs.
#
rloc.last_rloc_probe_reply = ts
rloc.compute_rloc_probe_rtt()
state_string = rloc.print_state_change("up")
if (rloc.state != LISP_RLOC_UP_STATE):
lisp_update_rtr_updown(rloc.rloc, True)
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
mc = lisp_map_cache.lookup_cache(eid, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endif
#
# Store hops.
#
rloc.store_rloc_probe_hops(hc, ttl)
#
# Store one-way latency if telemetry data json in Map-Reply.
#
if (jt): rloc.store_rloc_probe_latencies(jt)
probe = bold("RLOC-probe reply", False)
addr_str = rloc.rloc.print_address_no_iid()
rtt = bold(str(rloc.print_rloc_probe_rtt()), False)
p = ":{}".format(self.translated_port) if self.translated_port != 0 \
else ""
nh = ""
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
nh = ", nh {}({})".format(n, d)
#endif
lat = bold(rloc.print_rloc_probe_latency(), False)
lat = ", latency {}".format(lat) if jt else ""
e = green(lisp_print_eid_tuple(eid, group), False)
lprint((" Received {} from {}{} for {}, {}, rtt {}{}, " + \
"to-ttl/from-ttl {}{}").format(probe, red(addr_str, False), p, e,
state_string, rtt, nh, str(hc) + "/" + str(ttl), lat))
if (rloc.rloc_next_hop == None): return
#
# Now select better RTT next-hop.
#
rloc = None
install = None
while (True):
rloc = self if rloc == None else rloc.next_rloc
if (rloc == None): break
if (rloc.up_state() == False): continue
if (rloc.rloc_probe_rtt == -1): continue
if (install == None): install = rloc
if (rloc.rloc_probe_rtt < install.rloc_probe_rtt): install = rloc
#endwhile
if (install != None):
d, n = install.rloc_next_hop
nh = bold("nh {}({})".format(n, d), False)
lprint(" Install host-route via best {}".format(nh))
lisp_install_host_route(addr_str, None, False)
lisp_install_host_route(addr_str, n, True)
#endif
#enddef
def add_to_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (addr_str not in lisp_rloc_probe_list):
lisp_rloc_probe_list[addr_str] = []
#endif
if (group.is_null()): group.instance_id = 0
for r, e, g in lisp_rloc_probe_list[addr_str]:
if (e.is_exact_match(eid) and g.is_exact_match(group)):
if (r == self):
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
return
#endif
lisp_rloc_probe_list[addr_str].remove([r, e, g])
break
#endif
#endfor
lisp_rloc_probe_list[addr_str].append([self, eid, group])
#
# Copy reach/unreach state from first RLOC that the active RLOC-probing
# is run on.
#
rloc = lisp_rloc_probe_list[addr_str][0][0]
if (rloc.state == LISP_RLOC_UNREACH_STATE):
self.state = LISP_RLOC_UNREACH_STATE
self.last_state_change = lisp_get_timestamp()
#endif
#enddef
def delete_from_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (addr_str not in lisp_rloc_probe_list): return
array = []
for entry in lisp_rloc_probe_list[addr_str]:
if (entry[0] != self): continue
if (entry[1].is_exact_match(eid) == False): continue
if (entry[2].is_exact_match(group) == False): continue
array = entry
break
#endfor
if (array == []): return
try:
lisp_rloc_probe_list[addr_str].remove(array)
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
except:
return
#endtry
#enddef
def print_rloc_probe_state(self, trailing_linefeed):
output = ""
rloc = self
while (True):
sent = rloc.last_rloc_probe
if (sent == None): sent = 0
resp = rloc.last_rloc_probe_reply
if (resp == None): resp = 0
rtt = rloc.print_rloc_probe_rtt()
s = space(4)
if (rloc.rloc_next_hop == None):
output += "RLOC-Probing:\n"
else:
d, n = rloc.rloc_next_hop
output += "RLOC-Probing for nh {}({}):\n".format(n, d)
#endif
output += ("{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + \
"received: {}, rtt {}").format(s, lisp_print_elapsed(sent),
s, lisp_print_elapsed(resp), rtt)
if (trailing_linefeed): output += "\n"
rloc = rloc.next_rloc
if (rloc == None): break
output += "\n"
#endwhile
return(output)
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
def rloc_recent_rekey(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key == None): return(False)
if (key.last_rekey == None): return(True)
return(time.time() - key.last_rekey < 1)
except:
return(False)
#endtry
#enddef
#endclass
class lisp_mapping(object):
def __init__(self, eid, group, rloc_set):
self.eid = eid
if (eid == ""): self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = group
if (group == ""): self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_set = rloc_set
self.best_rloc_set = []
self.build_best_rloc_set()
self.uptime = lisp_get_timestamp()
self.action = LISP_NO_ACTION
self.expires = None
self.map_cache_ttl = None
self.register_ttl = LISP_REGISTER_TTL
self.last_refresh_time = self.uptime
self.source_cache = None
self.map_replies_sent = 0
self.mapping_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.use_mr_name = "all"
self.use_ms_name = "all"
self.stats = lisp_stats()
self.dynamic_eids = None
self.checkpoint_entry = False
self.secondary_iid = None
self.signature_eid = False
self.gleaned = False
self.recent_sources = {}
self.last_multicast_map_request = 0
self.subscribed_eid = None
self.subscribed_group = None
#enddef
def print_mapping(self, eid_indent, rloc_indent):
ts = lisp_print_elapsed(self.uptime)
group = "" if self.group.is_null() else \
", group {}".format(self.group.print_prefix())
lprint("{}eid {}{}, uptime {}, {} rlocs:".format(eid_indent,
green(self.eid.print_prefix(), False), group, ts,
len(self.rloc_set)))
for rloc in self.rloc_set: rloc.print_rloc(rloc_indent)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.map_cache_ttl
if (ttl == None): return("forever")
if (ttl >= 3600):
if ((ttl % 3600) == 0):
ttl = str(old_div(ttl, 3600)) + " hours"
else:
ttl = str(ttl * 60) + " mins"
#endif
elif (ttl >= 60):
if ((ttl % 60) == 0):
ttl = str(old_div(ttl, 60)) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def refresh(self):
if (self.group.is_null()): return(self.refresh_unicast())
return(self.refresh_multicast())
#enddef
def refresh_unicast(self):
return(self.is_active() and self.has_ttl_elapsed() and
self.gleaned == False)
#enddef
def refresh_multicast(self):
#
# Take uptime modulo TTL and if the value is greater than 10% of
# TTL, refresh entry. So that is around every 13 or 14 seconds.
#
elapsed = int((time.time() - self.uptime) % self.map_cache_ttl)
refresh = (elapsed in [0, 1, 2])
if (refresh == False): return(False)
#
# Don't send a refreshing Map-Request if we just sent one.
#
rate_limit = ((time.time() - self.last_multicast_map_request) <= 2)
if (rate_limit): return(False)
self.last_multicast_map_request = lisp_get_timestamp()
return(True)
#enddef
def has_ttl_elapsed(self):
if (self.map_cache_ttl == None): return(False)
elapsed = time.time() - self.last_refresh_time
if (elapsed >= self.map_cache_ttl): return(True)
#
# TTL is about to elapse. We need to refresh entry if we are 90%
# close to expiring.
#
almost_ttl = self.map_cache_ttl - (old_div(self.map_cache_ttl, 10))
if (elapsed >= almost_ttl): return(True)
return(False)
#enddef
def is_active(self):
if (self.stats.last_increment == None): return(False)
elapsed = time.time() - self.stats.last_increment
return(elapsed <= 60)
#enddef
def match_eid_tuple(self, db):
if (self.eid.is_exact_match(db.eid) == False): return(False)
if (self.group.is_exact_match(db.group) == False): return(False)
return(True)
#enddef
def sort_rloc_set(self):
self.rloc_set.sort(key=operator.attrgetter('rloc.address'))
#enddef
def delete_rlocs_from_rloc_probe_list(self):
for rloc in self.best_rloc_set:
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def build_best_rloc_set(self):
old_best = self.best_rloc_set
self.best_rloc_set = []
if (self.rloc_set == None): return
#
# Get best priority for first up RLOC.
#
pr = 256
for rloc in self.rloc_set:
if (rloc.up_state()): pr = min(rloc.priority, pr)
#endif
#
# For each up RLOC with best priority, put in best-rloc for data-plane.
# For each unreachable RLOC that has better priority than the best
# computed above, we want to RLOC-probe. So put in the RLOC probe list
# and best list. We need to set the timestamp last_rloc_probe or
# lisp_process_rloc_probe_timer() will think the unreach RLOC went
# down and is waiting for an RLOC-probe reply (it will never get).
#
for rloc in self.rloc_set:
if (rloc.priority <= pr):
if (rloc.unreach_state() and rloc.last_rloc_probe == None):
rloc.last_rloc_probe = lisp_get_timestamp()
#endif
self.best_rloc_set.append(rloc)
#endif
#endfor
#
# Put RLOC in lisp.lisp_rloc_probe_list if doesn't exist. And if
# we removed the RLOC out of the best list, we need to remove
# references.
#
for rloc in old_best:
if (rloc.priority < pr): continue
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
for rloc in self.best_rloc_set:
if (rloc.rloc.is_null()): continue
rloc.add_to_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def select_rloc(self, lisp_packet, ipc_socket):
packet = lisp_packet.packet
inner_version = lisp_packet.inner_version
length = len(self.best_rloc_set)
if (length == 0):
self.stats.increment(len(packet))
return([None, None, None, self.action, None, None])
#endif
ls = 4 if lisp_load_split_pings else 0
hashval = lisp_packet.hash_ports()
if (inner_version == 4):
for i in range(8+ls):
hashval = hashval ^ struct.unpack("B", packet[i+12:i+13])[0]
#endfor
elif (inner_version == 6):
for i in range(0, 32+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i+8:i+12])[0]
#endfor
hashval = (hashval >> 16) + (hashval & 0xffff)
hashval = (hashval >> 8) + (hashval & 0xff)
else:
for i in range(0, 12+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i:i+4])[0]
#endfor
#endif
if (lisp_data_plane_logging):
best = []
for r in self.best_rloc_set:
if (r.rloc.is_null()): continue
best.append([r.rloc.print_address_no_iid(), r.print_state()])
#endfor
dprint("Packet hash {}, index {}, best-rloc-list: {}".format( \
hex(hashval), hashval % length, red(str(best), False)))
#endif
#
# Get hashed value RLOC.
#
rloc = self.best_rloc_set[hashval % length]
#
# IF this RLOC is not in up state but was taken out of up state by
# not receiving echoed-nonces, try requesting again after some time.
#
echo_nonce = lisp_get_echo_nonce(rloc.rloc, None)
if (echo_nonce):
echo_nonce.change_state(rloc)
if (rloc.no_echoed_nonce_state()):
echo_nonce.request_nonce_sent = None
#endif
#endif
#
# Find a reachabile RLOC.
#
if (rloc.up_state() == False):
stop = hashval % length
index = (stop + 1) % length
while (index != stop):
rloc = self.best_rloc_set[index]
if (rloc.up_state()): break
index = (index + 1) % length
#endwhile
if (index == stop):
self.build_best_rloc_set()
return([None, None, None, None, None, None])
#endif
#endif
#
# We are going to use this RLOC. Increment statistics.
#
rloc.stats.increment(len(packet))
#
# Give RLE preference.
#
if (rloc.rle_name and rloc.rle == None):
if (rloc.rle_name in lisp_rle_list):
rloc.rle = lisp_rle_list[rloc.rle_name]
#endif
#endif
if (rloc.rle): return([None, None, None, None, rloc.rle, None])
#
# Next check if ELP is cached for this RLOC entry.
#
if (rloc.elp and rloc.elp.use_elp_node):
return([rloc.elp.use_elp_node.address, None, None, None, None,
None])
#endif
#
# Return RLOC address.
#
rloc_addr = None if (rloc.rloc.is_null()) else rloc.rloc
port = rloc.translated_port
action = self.action if (rloc_addr == None) else None
#
# Check to see if we are requesting an nonce to be echoed, or we are
# echoing a nonce.
#
nonce = None
if (echo_nonce and echo_nonce.request_nonce_timeout() == False):
nonce = echo_nonce.get_request_or_echo_nonce(ipc_socket, rloc_addr)
#endif
#
# If no RLOC address, check for native-forward.
#
return([rloc_addr, port, nonce, action, None, rloc])
#enddef
def do_rloc_sets_match(self, rloc_address_set):
if (len(self.rloc_set) != len(rloc_address_set)): return(False)
#
# Compare an array of lisp_address()es with the lisp_mapping()
# rloc-set which is an array of lisp_rloc()s.
#
for rloc_entry in self.rloc_set:
for rloc in rloc_address_set:
if (rloc.is_exact_match(rloc_entry.rloc) == False): continue
rloc = None
break
#endfor
if (rloc == rloc_address_set[-1]): return(False)
#endfor
return(True)
#enddef
def get_rloc(self, rloc):
for rloc_entry in self.rloc_set:
r = rloc_entry.rloc
if (rloc.is_exact_match(r)): return(rloc_entry)
#endfor
return(None)
#enddef
def get_rloc_by_interface(self, interface):
for rloc_entry in self.rloc_set:
if (rloc_entry.interface == interface): return(rloc_entry)
#endfor
return(None)
#enddef
def add_db(self):
if (self.group.is_null()):
lisp_db_for_lookups.add_cache(self.eid, self)
else:
db = lisp_db_for_lookups.lookup_cache(self.group, True)
if (db == None):
db = lisp_mapping(self.group, self.group, [])
lisp_db_for_lookups.add_cache(self.group, db)
#endif
db.add_source_entry(self)
#endif
#enddef
def add_cache(self, do_ipc=True):
if (self.group.is_null()):
lisp_map_cache.add_cache(self.eid, self)
if (lisp_program_hardware): lisp_program_vxlan_hardware(self)
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None):
mc = lisp_mapping(self.group, self.group, [])
mc.eid.copy_address(self.group)
mc.group.copy_address(self.group)
lisp_map_cache.add_cache(self.group, mc)
#endif
if (self.eid.is_null()): self.eid.make_default_route(mc.group)
mc.add_source_entry(self)
#endif
if (do_ipc): lisp_write_ipc_map_cache(True, self)
#enddef
def delete_cache(self):
self.delete_rlocs_from_rloc_probe_list()
lisp_write_ipc_map_cache(False, self)
if (self.group.is_null()):
lisp_map_cache.delete_cache(self.eid)
if (lisp_program_hardware):
prefix = self.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
#endif
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None): return
smc = mc.lookup_source_cache(self.eid, True)
if (smc == None): return
mc.source_cache.delete_cache(self.eid)
if (mc.source_cache.cache_size() == 0):
lisp_map_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_mc):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_mc.eid, source_mc)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def dynamic_eid_configured(self):
return(self.dynamic_eids != None)
#enddef
def star_secondary_iid(self, prefix):
if (self.secondary_iid == None): return(prefix)
iid = "," + str(self.secondary_iid)
return(prefix.replace(iid, iid + "*"))
#enddef
def increment_decap_stats(self, packet):
port = packet.udp_dport
if (port == LISP_DATA_PORT):
rloc = self.get_rloc(packet.outer_dest)
else:
#
# Only works with one translated RLOC.
#
for rloc in self.rloc_set:
if (rloc.translated_port != 0): break
#endfor
#endif
if (rloc != None): rloc.stats.increment(len(packet.packet))
self.stats.increment(len(packet.packet))
#enddef
def rtrs_in_rloc_set(self):
for rloc in self.rloc_set:
if (rloc.is_rtr()): return(True)
#endfor
return(False)
#enddef
def add_recent_source(self, source):
self.recent_sources[source.print_address()] = lisp_get_timestamp()
#enddef
#endclass
class lisp_dynamic_eid(object):
def __init__(self):
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.interface = None
self.last_packet = None
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#enddef
def get_timeout(self, interface):
try:
lisp_interface = lisp_myinterfaces[interface]
self.timeout = lisp_interface.dynamic_eid_timeout
except:
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#endtry
#enddef
#endclass
class lisp_group_mapping(object):
def __init__(self, group_name, ms_name, group_prefix, sources, rle_addr):
self.group_name = group_name
self.group_prefix = group_prefix
self.use_ms_name = ms_name
self.sources = sources
self.rle_address = rle_addr
#enddef
def add_group(self):
lisp_group_mapping_list[self.group_name] = self
#enddef
#endclass
#
# lisp_is_group_more_specific
#
# Take group address in string format and see if it is more specific than
# the group-prefix in class lisp_group_mapping(). If more specific, return
# mask-length, otherwise return -1.
#
def lisp_is_group_more_specific(group_str, group_mapping):
iid = group_mapping.group_prefix.instance_id
mask_len = group_mapping.group_prefix.mask_len
group = lisp_address(LISP_AFI_IPV4, group_str, 32, iid)
if (group.is_more_specific(group_mapping.group_prefix)): return(mask_len)
return(-1)
#enddef
#
# lisp_lookup_group
#
# Lookup group address in lisp_group_mapping_list{}.
#
def lisp_lookup_group(group):
best = None
for gm in list(lisp_group_mapping_list.values()):
mask_len = lisp_is_group_more_specific(group, gm)
if (mask_len == -1): continue
if (best == None or mask_len > best.group_prefix.mask_len): best = gm
#endfor
return(best)
#enddef
lisp_site_flags = {
"P": "ETR is {}Requesting Map-Server to Proxy Map-Reply",
"S": "ETR is {}LISP-SEC capable",
"I": "xTR-ID and site-ID are {}included in Map-Register",
"T": "Use Map-Register TTL field to timeout registration is {}set",
"R": "Merging registrations are {}requested",
"M": "ETR is {}a LISP Mobile-Node",
"N": "ETR is {}requesting Map-Notify messages from Map-Server"
}
class lisp_site(object):
def __init__(self):
self.site_name = ""
self.description = ""
self.shutdown = False
self.auth_sha1_or_sha2 = False
self.auth_key = {}
self.encryption_key = None
self.allowed_prefixes = {}
self.allowed_prefixes_sorted = []
self.allowed_rlocs = {}
self.map_notifies_sent = 0
self.map_notify_acks_received = 0
#enddef
#endclass
class lisp_site_eid(object):
def __init__(self, site):
self.site = site
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.first_registered = 0
self.last_registered = 0
self.last_registerer = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self.registered = False
self.registered_rlocs = []
self.auth_sha1_or_sha2 = False
self.individual_registrations = {}
self.map_registers_received = 0
self.proxy_reply_requested = False
self.force_proxy_reply = False
self.force_nat_proxy_reply = False
self.force_ttl = None
self.pitr_proxy_reply_drop = False
self.proxy_reply_action = ""
self.lisp_sec_present = False
self.map_notify_requested = False
self.mobile_node_requested = False
self.echo_nonce_capable = False
self.use_register_ttl_requested = False
self.merge_register_requested = False
self.xtr_id_present = False
self.xtr_id = 0
self.site_id = 0
self.accept_more_specifics = False
self.parent_for_more_specifics = None
self.dynamic = False
self.more_specific_registrations = []
self.source_cache = None
self.inconsistent_registration = False
self.policy = None
self.require_signature = False
self.encrypt_json = False
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_flags(self, html):
if (html == False):
output = "{}-{}-{}-{}-{}-{}-{}".format( \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_register_ttl_requested else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node_requested else "m",
"N" if self.map_notify_requested else "n")
else:
bits = self.print_flags(False)
bits = bits.split("-")
output = ""
for bit in bits:
bit_str = lisp_site_flags[bit.upper()]
bit_str = bit_str.format("" if bit.isupper() else "not ")
output += lisp_span(bit, bit_str)
if (bit.lower() != "n"): output += "-"
#endfor
#endif
return(output)
#enddef
def copy_state_to_parent(self, child):
self.xtr_id = child.xtr_id
self.site_id = child.site_id
self.first_registered = child.first_registered
self.last_registered = child.last_registered
self.last_registerer = child.last_registerer
self.register_ttl = child.register_ttl
if (self.registered == False):
self.first_registered = lisp_get_timestamp()
#endif
self.auth_sha1_or_sha2 = child.auth_sha1_or_sha2
self.registered = child.registered
self.proxy_reply_requested = child.proxy_reply_requested
self.lisp_sec_present = child.lisp_sec_present
self.xtr_id_present = child.xtr_id_present
self.use_register_ttl_requested = child.use_register_ttl_requested
self.merge_register_requested = child.merge_register_requested
self.mobile_node_requested = child.mobile_node_requested
self.map_notify_requested = child.map_notify_requested
#enddef
def build_sort_key(self):
sort_cache = lisp_cache()
ml, key = sort_cache.build_key(self.eid)
gkey = ""
if (self.group.is_null() == False):
gml, gkey = sort_cache.build_key(self.group)
gkey = "-" + gkey[0:12] + "-" + str(gml) + "-" + gkey[12::]
#endif
key = key[0:12] + "-" + str(ml) + "-" + key[12::] + gkey
del(sort_cache)
return(key)
#enddef
def merge_in_site_eid(self, child):
rle_changed = False
if (self.group.is_null()):
self.merge_rlocs_in_site_eid()
else:
rle_changed = self.merge_rles_in_site_eid()
#endif
#
# If a child registration was passed, copy some fields to the parent
# copy.
#
if (child != None):
self.copy_state_to_parent(child)
self.map_registers_received += 1
#endif
return(rle_changed)
#enddef
def copy_rloc_records(self):
new_list = []
for rloc_entry in self.registered_rlocs:
new_list.append(copy.deepcopy(rloc_entry))
#endfor
return(new_list)
#enddef
def merge_rlocs_in_site_eid(self):
self.registered_rlocs = []
for site_eid in list(self.individual_registrations.values()):
if (self.site_id != site_eid.site_id): continue
if (site_eid.registered == False): continue
self.registered_rlocs += site_eid.copy_rloc_records()
#endfor
#
# Remove duplicate RLOC addresses if multiple ETRs registered with
# the same RTR-set.
#
new_list = []
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_null() or len(new_list) == 0):
new_list.append(rloc_entry)
continue
#endif
for re in new_list:
if (re.rloc.is_null()): continue
if (rloc_entry.rloc.is_exact_match(re.rloc)): break
#endfor
if (re == new_list[-1]): new_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_list
#
# Removal case.
#
if (len(self.registered_rlocs) == 0): self.registered = False
return
#enddef
def merge_rles_in_site_eid(self):
#
# Build temporary old list of RLE nodes in dictionary array.
#
old_rle = {}
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle == None): continue
for rle_node in rloc_entry.rle.rle_nodes:
addr = rle_node.address.print_address_no_iid()
old_rle[addr] = rle_node.address
#endfor
break
#endif
#
# Merge in all RLOC entries of an RLOC-set.
#
self.merge_rlocs_in_site_eid()
#
# Remove RLEs that were added as RLOC-records in merge_rlocs_in_
# site_eid(). We only care about the first RLE that is the merged
# set of all the individual registered RLEs. We assume this appears
# first and that all subsequent RLOC-records are the RTR list for
# each registering ETR.
#
new_rloc_list = []
for rloc_entry in self.registered_rlocs:
if (self.registered_rlocs.index(rloc_entry) == 0):
new_rloc_list.append(rloc_entry)
continue
#endif
if (rloc_entry.rle == None): new_rloc_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_rloc_list
#
# Merge RLEs from individuals into master copy and make a temporary
# new_rle list to compare with old_rle. If there is a RLOC-name for
# the RLE, clear it from the merged registration. We want names to
# be per RLE entry and not the RLOC record entry it resides in.
#
rle = lisp_rle("")
new_rle = {}
rloc_name = None
for site_eid in list(self.individual_registrations.values()):
if (site_eid.registered == False): continue
irle = site_eid.registered_rlocs[0].rle
if (irle == None): continue
rloc_name = site_eid.registered_rlocs[0].rloc_name
for irle_node in irle.rle_nodes:
addr = irle_node.address.print_address_no_iid()
if (addr in new_rle): break
rle_node = lisp_rle_node()
rle_node.address.copy_address(irle_node.address)
rle_node.level = irle_node.level
rle_node.rloc_name = rloc_name
rle.rle_nodes.append(rle_node)
new_rle[addr] = irle_node.address
#endfor
#endfor
#
# Store new copy.
#
if (len(rle.rle_nodes) == 0): rle = None
if (len(self.registered_rlocs) != 0):
self.registered_rlocs[0].rle = rle
if (rloc_name): self.registered_rlocs[0].rloc_name = None
#endif
#
# Check for changes.
#
if (list(old_rle.keys()) == list(new_rle.keys())): return(False)
lprint("{} {} from {} to {}".format( \
green(self.print_eid_tuple(), False), bold("RLE change", False),
list(old_rle.keys()), list(new_rle.keys())))
return(True)
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.add_cache(self.eid, self)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None):
se = lisp_site_eid(self.site)
se.eid.copy_address(self.group)
se.group.copy_address(self.group)
lisp_sites_by_eid.add_cache(self.group, se)
#
# See lisp_site_eid_lookup() for special case details for
# longest match looks for (S,G) entries.
#
se.parent_for_more_specifics = self.parent_for_more_specifics
#endif
if (self.eid.is_null()): self.eid.make_default_route(se.group)
se.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.delete_cache(self.eid)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None): return
site_eid = se.lookup_source_cache(self.eid, True)
if (site_eid == None): return
if (se.source_cache == None): return
se.source_cache.delete_cache(self.eid)
if (se.source_cache.cache_size() == 0):
lisp_sites_by_eid.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_se):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_se.eid, source_se)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
def eid_record_matches(self, eid_record):
if (self.eid.is_exact_match(eid_record.eid) == False): return(False)
if (eid_record.group.is_null()): return(True)
return(eid_record.group.is_exact_match(self.group))
#enddef
def inherit_from_ams_parent(self):
parent = self.parent_for_more_specifics
if (parent == None): return
self.force_proxy_reply = parent.force_proxy_reply
self.force_nat_proxy_reply = parent.force_nat_proxy_reply
self.force_ttl = parent.force_ttl
self.pitr_proxy_reply_drop = parent.pitr_proxy_reply_drop
self.proxy_reply_action = parent.proxy_reply_action
self.echo_nonce_capable = parent.echo_nonce_capable
self.policy = parent.policy
self.require_signature = parent.require_signature
self.encrypt_json = parent.encrypt_json
#enddef
def rtrs_in_rloc_set(self):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rtr_in_rloc_set(self, rtr_rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_exact_match(rtr_rloc) == False): continue
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rloc_in_rloc_set(self, rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle):
for rle in rloc_entry.rle.rle_nodes:
if (rle.address.is_exact_match(rloc)): return(True)
#endif
#endif
if (rloc_entry.rloc.is_exact_match(rloc)): return(True)
#endfor
return(False)
#enddef
def do_rloc_sets_match(self, prev_rloc_set):
if (len(self.registered_rlocs) != len(prev_rloc_set)): return(False)
for rloc_entry in prev_rloc_set:
old_rloc = rloc_entry.rloc
if (self.is_rloc_in_rloc_set(old_rloc) == False): return(False)
#endfor
return(True)
#enddef
#endclass
class lisp_mr(object):
def __init__(self, addr_str, dns_name, mr_name):
self.mr_name = mr_name if (mr_name != None) else "all"
self.dns_name = dns_name
self.map_resolver = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (addr_str):
self.map_resolver.store_address(addr_str)
self.insert_mr()
else:
self.resolve_dns_name()
#endif
self.last_used = 0
self.last_reply = 0
self.last_nonce = 0
self.map_requests_sent = 0
self.neg_map_replies_received = 0
self.total_rtt = 0
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_mr()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_resolver.print_address_no_iid()):
self.delete_mr()
self.map_resolver.store_address(addr)
self.insert_mr()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_mr() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
mr = lisp_get_map_resolver(a, None)
if (mr != None and mr.a_record_index == a_records.index(addr)):
continue
#endif
mr = lisp_mr(addr, None, None)
mr.a_record_index = a_records.index(addr)
mr.dns_name = self.dns_name
mr.last_dns_resolve = lisp_get_timestamp()
#endfor
#
# Check for deletes.
#
delete_list = []
for mr in list(lisp_map_resolvers_list.values()):
if (self.dns_name != mr.dns_name): continue
a = mr.map_resolver.print_address_no_iid()
if (a in a_records): continue
delete_list.append(mr)
#endfor
for mr in delete_list: mr.delete_mr()
#enddef
def insert_mr(self):
key = self.mr_name + self.map_resolver.print_address()
lisp_map_resolvers_list[key] = self
#enddef
def delete_mr(self):
key = self.mr_name + self.map_resolver.print_address()
if (key not in lisp_map_resolvers_list): return
lisp_map_resolvers_list.pop(key)
#enddef
#endclass
class lisp_ddt_root(object):
def __init__(self):
self.root_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.priority = 0
self.weight = 0
#enddef
#endclass
class lisp_referral(object):
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_set = {}
self.referral_type = LISP_DDT_ACTION_NULL
self.referral_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_ttl = 0
self.uptime = lisp_get_timestamp()
self.expires = 0
self.source_cache = None
#enddef
def print_referral(self, eid_indent, referral_indent):
uts = lisp_print_elapsed(self.uptime)
ets = lisp_print_future(self.expires)
lprint("{}Referral EID {}, uptime/expires {}/{}, {} referrals:". \
format(eid_indent, green(self.eid.print_prefix(), False), uts,
ets, len(self.referral_set)))
for ref_node in list(self.referral_set.values()):
ref_node.print_ref_node(referral_indent)
#endfor
#enddef
def print_referral_type(self):
if (self.eid.afi == LISP_AFI_ULTIMATE_ROOT): return("root")
if (self.referral_type == LISP_DDT_ACTION_NULL):
return("null-referral")
#endif
if (self.referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND):
return("no-site-action")
#endif
if (self.referral_type > LISP_DDT_ACTION_MAX):
return("invalid-action")
#endif
return(lisp_map_referral_action_string[self.referral_type])
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.referral_ttl
if (ttl < 60): return(str(ttl) + " secs")
if ((ttl % 60) == 0):
ttl = str(old_div(ttl, 60)) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def is_referral_negative(self):
return (self.referral_type in \
(LISP_DDT_ACTION_MS_NOT_REG, LISP_DDT_ACTION_DELEGATION_HOLE,
LISP_DDT_ACTION_NOT_AUTH))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_referral_cache.add_cache(self.eid, self)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None):
ref = lisp_referral()
ref.eid.copy_address(self.group)
ref.group.copy_address(self.group)
lisp_referral_cache.add_cache(self.group, ref)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ref.group)
ref.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_referral_cache.delete_cache(self.eid)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None): return
sref = ref.lookup_source_cache(self.eid, True)
if (sref == None): return
ref.source_cache.delete_cache(self.eid)
if (ref.source_cache.cache_size() == 0):
lisp_referral_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_ref):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ref.eid, source_ref)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
#endclass
class lisp_referral_node(object):
def __init__(self):
self.referral_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.priority = 0
self.weight = 0
self.updown = True
self.map_requests_sent = 0
self.no_responses = 0
self.uptime = lisp_get_timestamp()
#enddef
def print_ref_node(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}referral {}, uptime {}, {}, priority/weight: {}/{}".format( \
indent, red(self.referral_address.print_address(), False), ts,
"up" if self.updown else "down", self.priority, self.weight))
#enddef
#endclass
class lisp_ms(object):
def __init__(self, addr_str, dns_name, ms_name, alg_id, key_id, pw, pr,
mr, rr, wmn, site_id, ekey_id, ekey):
self.ms_name = ms_name if (ms_name != None) else "all"
self.dns_name = dns_name
self.map_server = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (lisp_map_servers_list == {}):
self.xtr_id = lisp_get_control_nonce()
else:
self.xtr_id = list(lisp_map_servers_list.values())[0].xtr_id
#endif
self.alg_id = alg_id
self.key_id = key_id
self.password = pw
self.proxy_reply = pr
self.merge_registrations = mr
self.refresh_registrations = rr
self.want_map_notify = wmn
self.site_id = site_id
self.map_registers_sent = 0
self.map_registers_multicast_sent = 0
self.map_notifies_received = 0
self.map_notify_acks_sent = 0
self.ekey_id = ekey_id
self.ekey = ekey
if (addr_str):
self.map_server.store_address(addr_str)
self.insert_ms()
else:
self.resolve_dns_name()
#endif
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_ms()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_server.print_address_no_iid()):
self.delete_ms()
self.map_server.store_address(addr)
self.insert_ms()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_ms() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
ms = lisp_get_map_server(a)
if (ms != None and ms.a_record_index == a_records.index(addr)):
continue
#endif
ms = copy.deepcopy(self)
ms.map_server.store_address(addr)
ms.a_record_index = a_records.index(addr)
ms.last_dns_resolve = lisp_get_timestamp()
ms.insert_ms()
#endfor
#
# Check for deletes.
#
delete_list = []
for ms in list(lisp_map_servers_list.values()):
if (self.dns_name != ms.dns_name): continue
a = ms.map_server.print_address_no_iid()
if (a in a_records): continue
delete_list.append(ms)
#endfor
for ms in delete_list: ms.delete_ms()
#enddef
def insert_ms(self):
key = self.ms_name + self.map_server.print_address()
lisp_map_servers_list[key] = self
#enddef
def delete_ms(self):
key = self.ms_name + self.map_server.print_address()
if (key not in lisp_map_servers_list): return
lisp_map_servers_list.pop(key)
#enddef
#endclass
class lisp_interface(object):
def __init__(self, device):
self.interface_name = ""
self.device = device
self.instance_id = None
self.bridge_socket = None
self.raw_socket = None
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dynamic_eid_device = None
self.dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self.multi_tenant_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#enddef
def add_interface(self):
lisp_myinterfaces[self.device] = self
#enddef
def get_instance_id(self):
return(self.instance_id)
#enddef
def get_socket(self):
return(self.raw_socket)
#enddef
def get_bridge_socket(self):
return(self.bridge_socket)
#enddef
def does_dynamic_eid_match(self, eid):
if (self.dynamic_eid.is_null()): return(False)
return(eid.is_more_specific(self.dynamic_eid))
#enddef
def set_socket(self, device):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, device)
except:
s.close()
s = None
#endtry
self.raw_socket = s
#enddef
def set_bridge_socket(self, device):
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
try:
s = s.bind((device, 0))
self.bridge_socket = s
except:
return
#endtry
#enddef
#endclass
class lisp_datetime(object):
def __init__(self, datetime_str):
self.datetime_name = datetime_str
self.datetime = None
self.parse_datetime()
#enddef
def valid_datetime(self):
ds = self.datetime_name
if (ds.find(":") == -1): return(False)
if (ds.find("-") == -1): return(False)
year, month, day, time = ds[0:4], ds[5:7], ds[8:10], ds[11::]
if ((year + month + day).isdigit() == False): return(False)
if (month < "01" and month > "12"): return(False)
if (day < "01" and day > "31"): return(False)
hour, mi, sec = time.split(":")
if ((hour + mi + sec).isdigit() == False): return(False)
if (hour < "00" and hour > "23"): return(False)
if (mi < "00" and mi > "59"): return(False)
if (sec < "00" and sec > "59"): return(False)
return(True)
#enddef
def parse_datetime(self):
dt = self.datetime_name
dt = dt.replace("-", "")
dt = dt.replace(":", "")
self.datetime = int(dt)
#enddef
def now(self):
ts = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
ts = lisp_datetime(ts)
return(ts)
#enddef
def print_datetime(self):
return(self.datetime_name)
#enddef
def future(self):
return(self.datetime > self.now().datetime)
#enddef
def past(self):
return(self.future() == False)
#enddef
def now_in_range(self, upper):
return(self.past() and upper.future())
#enddef
def this_year(self):
now = str(self.now().datetime)[0:4]
ts = str(self.datetime)[0:4]
return(ts == now)
#enddef
def this_month(self):
now = str(self.now().datetime)[0:6]
ts = str(self.datetime)[0:6]
return(ts == now)
#enddef
def today(self):
now = str(self.now().datetime)[0:8]
ts = str(self.datetime)[0:8]
return(ts == now)
#enddef
#endclass
#
# Policy data structures.
#
class lisp_policy_match(object):
def __init__(self):
self.source_eid = None
self.dest_eid = None
self.source_rloc = None
self.dest_rloc = None
self.rloc_record_name = None
self.geo_name = None
self.elp_name = None
self.rle_name = None
self.json_name = None
self.datetime_lower = None
self.datetime_upper = None
#endclass
class lisp_policy(object):
def __init__(self, policy_name):
self.policy_name = policy_name
self.match_clauses = []
self.set_action = None
self.set_record_ttl = None
self.set_source_eid = None
self.set_dest_eid = None
self.set_rloc_address = None
self.set_rloc_record_name = None
self.set_geo_name = None
self.set_elp_name = None
self.set_rle_name = None
self.set_json_name = None
#enddef
def match_policy_map_request(self, mr, srloc):
for m in self.match_clauses:
p = m.source_eid
t = mr.source_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.dest_eid
t = mr.target_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.source_rloc
t = srloc
if (p and t and t.is_more_specific(p) == False): continue
l = m.datetime_lower
u = m.datetime_upper
if (l and u and l.now_in_range(u) == False): continue
return(True)
#endfor
return(False)
#enddef
def set_policy_map_reply(self):
all_none = (self.set_rloc_address == None and
self.set_rloc_record_name == None and self.set_geo_name == None and
self.set_elp_name == None and self.set_rle_name == None)
if (all_none): return(None)
rloc = lisp_rloc()
if (self.set_rloc_address):
rloc.rloc.copy_address(self.set_rloc_address)
addr = rloc.rloc.print_address_no_iid()
lprint("Policy set-rloc-address to {}".format(addr))
#endif
if (self.set_rloc_record_name):
rloc.rloc_name = self.set_rloc_record_name
name = blue(rloc.rloc_name, False)
lprint("Policy set-rloc-record-name to {}".format(name))
#endif
if (self.set_geo_name):
rloc.geo_name = self.set_geo_name
name = rloc.geo_name
not_found = "" if (name in lisp_geo_list) else \
"(not configured)"
lprint("Policy set-geo-name '{}' {}".format(name, not_found))
#endif
if (self.set_elp_name):
rloc.elp_name = self.set_elp_name
name = rloc.elp_name
not_found = "" if (name in lisp_elp_list) else \
"(not configured)"
lprint("Policy set-elp-name '{}' {}".format(name, not_found))
#endif
if (self.set_rle_name):
rloc.rle_name = self.set_rle_name
name = rloc.rle_name
not_found = "" if (name in lisp_rle_list) else \
"(not configured)"
lprint("Policy set-rle-name '{}' {}".format(name, not_found))
#endif
if (self.set_json_name):
rloc.json_name = self.set_json_name
name = rloc.json_name
not_found = "" if (name in lisp_json_list) else \
"(not configured)"
lprint("Policy set-json-name '{}' {}".format(name, not_found))
#endif
return(rloc)
#enddef
def save_policy(self):
lisp_policies[self.policy_name] = self
#enddef
#endclass
class lisp_pubsub(object):
def __init__(self, itr, port, nonce, ttl, xtr_id):
self.itr = itr
self.port = port
self.nonce = nonce
self.uptime = lisp_get_timestamp()
self.ttl = ttl
self.xtr_id = xtr_id
self.map_notify_count = 0
self.eid_prefix = None
#enddef
def add(self, eid_prefix):
self.eid_prefix = eid_prefix
ttl = self.ttl
eid = eid_prefix.print_prefix()
if (eid not in lisp_pubsub_cache):
lisp_pubsub_cache[eid] = {}
#endif
pubsub = lisp_pubsub_cache[eid]
ar = "Add"
if (self.xtr_id in pubsub):
ar = "Replace"
del(pubsub[self.xtr_id])
#endif
pubsub[self.xtr_id] = self
eid = green(eid, False)
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
lprint("{} pubsub state {} for {}, xtr-id: {}, ttl {}".format(ar, eid,
itr, xtr_id, ttl))
#enddef
def delete(self, eid_prefix):
eid = eid_prefix.print_prefix()
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
if (eid in lisp_pubsub_cache):
pubsub = lisp_pubsub_cache[eid]
if (self.xtr_id in pubsub):
pubsub.pop(self.xtr_id)
lprint("Remove pubsub state {} for {}, xtr-id: {}".format(eid,
itr, xtr_id))
#endif
#endif
#enddef
#endclass
#
# lisp_trace
#
# The LISP-Trace message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=9 | 0 | Local Private Port |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Local Private IPv4 RLOC |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_trace(object):
def __init__(self):
self.nonce = lisp_get_control_nonce()
self.packet_json = []
self.local_rloc = None
self.local_port = None
self.lisp_socket = None
#enddef
def print_trace(self):
jd = self.packet_json
lprint("LISP-Trace JSON: '{}'".format(jd))
#enddef
def encode(self):
first_long = socket.htonl(0x90000000)
packet = struct.pack("II", first_long, 0)
packet += struct.pack("Q", self.nonce)
packet += json.dumps(self.packet_json)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
if ((first_long & 0xff000000) != 0x90000000): return(False)
if (len(packet) < format_size): return(False)
addr = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
addr = socket.ntohl(addr)
v1 = addr >> 24
v2 = (addr >> 16) & 0xff
v3 = (addr >> 8) & 0xff
v4 = addr & 0xff
self.local_rloc = "{}.{}.{}.{}".format(v1, v2, v3, v4)
self.local_port = str(first_long & 0xffff)
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (len(packet) == 0): return(True)
try:
self.packet_json = json.loads(packet)
except:
return(False)
#entry
return(True)
#enddef
def myeid(self, eid):
return(lisp_is_myeid(eid))
#enddef
def return_to_sender(self, lisp_socket, rts_rloc, packet):
rloc, port = self.rtr_cache_nat_trace_find(rts_rloc)
if (rloc == None):
rloc, port = rts_rloc.split(":")
port = int(port)
lprint("Send LISP-Trace to address {}:{}".format(rloc, port))
else:
lprint("Send LISP-Trace to translated address {}:{}".format(rloc,
port))
#endif
if (lisp_socket == None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", LISP_TRACE_PORT))
s.sendto(packet, (rloc, port))
s.close()
else:
lisp_socket.sendto(packet, (rloc, port))
#endif
#enddef
def packet_length(self):
udp = 8; trace = 4 + 4 + 8
return(udp + trace + len(json.dumps(self.packet_json)))
#enddef
def rtr_cache_nat_trace(self, translated_rloc, translated_port):
key = self.local_rloc + ":" + self.local_port
value = (translated_rloc, translated_port)
lisp_rtr_nat_trace_cache[key] = value
lprint("Cache NAT Trace addresses {} -> {}".format(key, value))
#enddef
def rtr_cache_nat_trace_find(self, local_rloc_and_port):
key = local_rloc_and_port
try: value = lisp_rtr_nat_trace_cache[key]
except: value = (None, None)
return(value)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_get_map_server
#
# Return a lisp_ms() class instance. Variable 'address' is a lisp_address()
# class instance.
#
def lisp_get_map_server(address):
for ms in list(lisp_map_servers_list.values()):
if (ms.map_server.is_exact_match(address)): return(ms)
#endfor
return(None)
#enddef
#
# lisp_get_any_map_server
#
# Return the first lisp_ms() class instance.
#
def lisp_get_any_map_server():
for ms in list(lisp_map_servers_list.values()): return(ms)
return(None)
#enddef
#
# lisp_get_map_resolver
#
# Get least recently used Map-Resolver if address is not supplied. Variable
# 'eid' takes on 3 values, an EID value in the form of lisp_address(), None,
# or "". Value "" means to use any MR, like the first one. Value None means
# to use a map-resolver-name that has not been configured (i.e. "all").
#
def lisp_get_map_resolver(address, eid):
if (address != None):
addr = address.print_address()
mr = None
for key in lisp_map_resolvers_list:
if (key.find(addr) == -1): continue
mr = lisp_map_resolvers_list[key]
#endfor
return(mr)
#endif
#
# Get database-mapping entry to find out which map-resolver name set we
# should use, or pick one from a non-configured mr-name list. Or, get the
# first one for info-requests.
#
if (eid == ""):
mr_name = ""
elif (eid == None):
mr_name = "all"
else:
db = lisp_db_for_lookups.lookup_cache(eid, False)
mr_name = "all" if db == None else db.use_mr_name
#endif
older = None
for mr in list(lisp_map_resolvers_list.values()):
if (mr_name == ""): return(mr)
if (mr.mr_name != mr_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_get_decent_map_resolver
#
# Get the Map-Resolver based on the LISP-Decent pull mapping system lookup
# algorithm
#
def lisp_get_decent_map_resolver(eid):
index = lisp_get_decent_index(eid)
dns_name = str(index) + "." + lisp_decent_dns_suffix
lprint("Use LISP-Decent map-resolver {} for EID {}".format( \
bold(dns_name, False), eid.print_prefix()))
older = None
for mr in list(lisp_map_resolvers_list.values()):
if (dns_name != mr.dns_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_ipv4_input
#
# Process IPv4 data packet for input checking.
#
def lisp_ipv4_input(packet):
#
# Check IGMP packet first. And don't do IP checksum and don't test TTL.
#
if (ord(packet[9:10]) == 2): return([True, packet])
#
# Now calculate checksum for verification.
#
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum == 0):
dprint("Packet arrived with checksum of 0!")
else:
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
dprint("IPv4 header checksum failed for inner header")
packet = lisp_format_packet(packet[0:20])
dprint("Packet header: {}".format(packet))
return([False, None])
#endif
#endif
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[8:9])[0]
if (ttl == 0):
dprint("IPv4 packet arrived with ttl 0, packet discarded")
return([False, None])
elif (ttl == 1):
dprint("IPv4 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return([False, None])
#endif
ttl -= 1
packet = packet[0:8] + struct.pack("B", ttl) + packet[9::]
packet = packet[0:10] + struct.pack("H", 0) + packet[12::]
packet = lisp_ip_checksum(packet)
return([False, packet])
#enddef
#
# lisp_ipv6_input
#
# Process IPv6 data packet for input checking.
#
def lisp_ipv6_input(packet):
dest = packet.inner_dest
packet = packet.packet
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[7:8])[0]
if (ttl == 0):
dprint("IPv6 packet arrived with hop-limit 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv6 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
#
# Check for IPv6 link-local addresses. They should not go on overlay.
#
if (dest.is_ipv6_link_local()):
dprint("Do not encapsulate IPv6 link-local packets")
return(None)
#endif
ttl -= 1
packet = packet[0:7] + struct.pack("B", ttl) + packet[8::]
return(packet)
#enddef
#
# lisp_mac_input
#
# Process MAC data frame for input checking. All we need to do is get the
# destination MAC address.
#
def lisp_mac_input(packet):
return(packet)
#enddef
#
# lisp_rate_limit_map_request
#
# Check to see if we have sent a data-triggered Map-Request in the last
# LISP_MAP_REQUEST_RATE_LIMIT seconds. Return True if we should not send
# a Map-Request (rate-limit it).
#
def lisp_rate_limit_map_request(dest):
now = lisp_get_timestamp()
#
# Do we have rate-limiting disabled temporarily?
#
elapsed = now - lisp_no_map_request_rate_limit
if (elapsed < LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME):
left = int(LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME - elapsed)
dprint("No Rate-Limit Mode for another {} secs".format(left))
return(False)
#endif
#
# Do we send a Map-Request recently?
#
if (lisp_last_map_request_sent == None): return(False)
elapsed = now - lisp_last_map_request_sent
rate_limit = (elapsed < LISP_MAP_REQUEST_RATE_LIMIT)
if (rate_limit):
dprint("Rate-limiting Map-Request for {}, sent {} secs ago".format( \
green(dest.print_address(), False), round(elapsed, 3)))
#endif
return(rate_limit)
#enddef
#
# lisp_send_map_request
#
# From this process, build and send a Map-Request for supplied EID.
#
def lisp_send_map_request(lisp_sockets, lisp_ephem_port, seid, deid, rloc,
pubsub=False):
global lisp_last_map_request_sent
#
# Set RLOC-probe parameters if caller wants Map-Request to be an
# RLOC-probe. We use probe_port as 4341 so we the ITR and RTR keying data
# structures can be the same.
#
probe_dest = probe_port = None
if (rloc):
probe_dest = rloc.rloc
probe_port = rloc.translated_port if lisp_i_am_rtr else LISP_DATA_PORT
#endif
#
# If there are no RLOCs found, do not build and send the Map-Request.
#
itr_rloc4, itr_rloc6, device = lisp_myrlocs
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, IPv4 RLOC not found")
return
#endif
if (itr_rloc6 == None and probe_dest != None and probe_dest.is_ipv6()):
lprint("Suppress sending Map-Request, IPv6 RLOC not found")
return
#endif
map_request = lisp_map_request()
map_request.record_count = 1
map_request.nonce = lisp_get_control_nonce()
map_request.rloc_probe = (probe_dest != None)
map_request.subscribe_bit = pubsub
map_request.xtr_id_present = pubsub
#
# Hold request nonce so we can match replies from xTRs that have multiple
# RLOCs. Reason being is because source address may not be the probed
# destination. And on our ETR implementation, we can get the probe request
# destination in the lisp-core/lisp-etr/lisp-rtr processes.
#
if (rloc): rloc.last_rloc_probe_nonce = map_request.nonce
sg = deid.is_multicast_address()
if (sg):
map_request.target_eid = seid
map_request.target_group = deid
else:
map_request.target_eid = deid
#endif
#
# If lookup is for an IPv6 EID or there is a signature key configured and
# there is a private key file in current directory, tell lisp_map_request()
# to sign Map-Request. For an RTR, we want to verify its map-request
# signature, so it needs to include its own IPv6 EID that matches the
# private-key file.
#
if (map_request.rloc_probe == False):
db = lisp_get_signature_eid()
if (db):
map_request.signature_eid.copy_address(db.eid)
map_request.privkey_filename = "./lisp-sig.pem"
#endif
#endif
#
# Fill in source-eid field.
#
if (seid == None or sg):
map_request.source_eid.afi = LISP_AFI_NONE
else:
map_request.source_eid = seid
#endif
#
# If ITR-RLOC is a private IPv4 address, we need it to be a global address
# for RLOC-probes.
#
# However, if we are an RTR and have a private address, the RTR is behind
# a NAT. The RLOC-probe is encapsulated with source-port 4341 to get
# through NAT. The ETR receiving the RLOC-probe request must return the
# RLOC-probe reply with same translated address/port pair (the same values
# when it encapsulates data packets).
#
if (probe_dest != None and lisp_nat_traversal and lisp_i_am_rtr == False):
if (probe_dest.is_private_address() == False):
itr_rloc4 = lisp_get_any_translated_rloc()
#endif
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, translated RLOC not found")
return
#endif
#endif
#
# Fill in ITR-RLOCs field. If we don't find an IPv6 address there is
# nothing to store in the ITR-RLOCs list. And we have to use an inner
# source address of 0::0.
#
if (probe_dest == None or probe_dest.is_ipv4()):
if (lisp_nat_traversal and probe_dest == None):
ir = lisp_get_any_translated_rloc()
if (ir != None): itr_rloc4 = ir
#endif
map_request.itr_rlocs.append(itr_rloc4)
#endif
if (probe_dest == None or probe_dest.is_ipv6()):
if (itr_rloc6 == None or itr_rloc6.is_ipv6_link_local()):
itr_rloc6 = None
else:
map_request.itr_rloc_count = 1 if (probe_dest == None) else 0
map_request.itr_rlocs.append(itr_rloc6)
#endif
#endif
#
# Decide what inner source address needs to be for the ECM. We have to
# look at the address-family of the destination EID. If the destination-EID
# is a MAC address, we will use IPv4 in the inner header with a destination
# address of 0.0.0.0.
#
if (probe_dest != None and map_request.itr_rlocs != []):
itr_rloc = map_request.itr_rlocs[0]
else:
if (deid.is_ipv4()):
itr_rloc = itr_rloc4
elif (deid.is_ipv6()):
itr_rloc = itr_rloc6
else:
itr_rloc = itr_rloc4
#endif
#endif
#
# And finally add one EID record. The EID we are looking up.
#
packet = map_request.encode(probe_dest, probe_port)
map_request.print_map_request()
#
# If this is an RLOC-probe, send directly to RLOC and not to mapping
# system. If the RLOC is behind a NAT, we need to data encapsulate it
# from port 4341 to translated destination address and port.
#
if (probe_dest != None):
if (rloc.is_rloc_translated()):
nat_info = lisp_get_nat_info(probe_dest, rloc.rloc_name)
#
# Handle gleaned RLOC case.
#
if (nat_info == None):
r = rloc.rloc.print_address_no_iid()
g = "gleaned-{}".format(r)
p = rloc.translated_port
nat_info = lisp_nat_info(r, g, p)
#endif
lisp_encapsulate_rloc_probe(lisp_sockets, probe_dest, nat_info,
packet)
return
#endif
addr_str = probe_dest.print_address_no_iid()
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#endif
#
# Get least recently used Map-Resolver. In the RTR make sure there is a
# Map-Resolver in lisp.config with no mr-name or mr-name=all.
#
local_eid = None if lisp_i_am_rtr else seid
if (lisp_decent_pull_xtr_configured()):
mr = lisp_get_decent_map_resolver(deid)
else:
mr = lisp_get_map_resolver(None, local_eid)
#endif
if (mr == None):
lprint("Cannot find Map-Resolver for source-EID {}".format( \
green(seid.print_address(), False)))
return
#endif
mr.last_used = lisp_get_timestamp()
mr.map_requests_sent += 1
if (mr.last_nonce == 0): mr.last_nonce = map_request.nonce
#
# Send ECM based Map-Request to Map-Resolver.
#
if (seid == None): seid = itr_rloc
lisp_send_ecm(lisp_sockets, packet, seid, lisp_ephem_port, deid,
mr.map_resolver)
#
# Set global timestamp for Map-Request rate-limiting.
#
lisp_last_map_request_sent = lisp_get_timestamp()
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
mr.resolve_dns_name()
return
#enddef
#
# lisp_send_info_request
#
# Send info-request to any map-server configured or to an address supplied
# by the caller.
#
def lisp_send_info_request(lisp_sockets, dest, port, device_name):
#
# Build Info-Request message.
#
info = lisp_info()
info.nonce = lisp_get_control_nonce()
if (device_name): info.hostname += "-" + device_name
addr_str = dest.print_address_no_iid()
#
# Find next-hop for interface 'device_name' if supplied. The "ip route"
# command will produce this:
#
# pi@lisp-pi ~/lisp $ ip route | egrep "default via"
# default via 192.168.1.1 dev eth1
# default via 192.168.1.1 dev wlan0
#
# We then turn the line we want into a "ip route add" command. Then at
# the end of this function we remove the route.
#
# We do this on the ETR only so we don't have Info-Requests from the lisp-
# itr and lisp-etr process both add and delete host routes (for Info-
# Request sending purposes) at the same time.
#
added_route = False
if (device_name):
save_nh = lisp_get_host_route_next_hop(addr_str)
#
# If we found a host route for the map-server, then both the lisp-itr
# and lisp-etr processes are in this routine at the same time.
# wait for the host route to go away before proceeding. We will use
# the map-server host route as a IPC lock. For the data port, only
# the lisp-etr processes will add host route to the RTR for Info-
# Requests.
#
if (port == LISP_CTRL_PORT and save_nh != None):
while (True):
time.sleep(.01)
save_nh = lisp_get_host_route_next_hop(addr_str)
if (save_nh == None): break
#endwhile
#endif
default_routes = lisp_get_default_route_next_hops()
for device, nh in default_routes:
if (device != device_name): continue
#
# If there is a data route pointing to same next-hop, don't
# change the routing table. Otherwise, remove saved next-hop,
# add the one we want and later undo this.
#
if (save_nh != nh):
if (save_nh != None):
lisp_install_host_route(addr_str, save_nh, False)
#endif
lisp_install_host_route(addr_str, nh, True)
added_route = True
#endif
break
#endfor
#endif
#
# Encode the Info-Request message and print it.
#
packet = info.encode()
info.print_info()
#
# Send it.
#
cd = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
cd = bold(cd, False)
p = bold("{}".format(port), False)
a = red(addr_str, False)
rtr = "RTR " if port == LISP_DATA_PORT else "MS "
lprint("Send Info-Request to {}{}, port {} {}".format(rtr, a, p, cd))
#
# Send packet to control port via control-sockets interface. For a 4341
# do the same via the lisp-core process but prepend a LISP data header
# to the message.
#
if (port == LISP_CTRL_PORT):
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
else:
header = lisp_data_header()
header.instance_id(0xffffff)
header = header.encode()
if (header):
packet = header + packet
#
# The NAT-traversal spec says to use port 4342 as the source port
# but that would mean return data packets will go to the lisp-core
# process. We are going to use an ephemeral port here so packets
# come to this lisp-etr process. The commented out call is to
# allow Info-Requests to use source port 4342 but will break the
# data-plane in this lispers.net implementation.
#
lisp_send(lisp_sockets, dest, LISP_DATA_PORT, packet)
# lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
#endif
#endif
#
# Remove static route to RTR if had added one and restore data route.
#
if (added_route):
lisp_install_host_route(addr_str, None, False)
if (save_nh != None): lisp_install_host_route(addr_str, save_nh, True)
#endif
return
#enddef
#
# lisp_process_info_request
#
# Process received Info-Request message. Return a Info-Reply to sender.
#
def lisp_process_info_request(lisp_sockets, packet, addr_str, sport, rtr_list):
#
# Parse Info-Request so we can return the nonce in the Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return
info.print_info()
#
# Start building the Info-Reply. Copy translated source and translated
# source port from Info-Request.
#
info.info_reply = True
info.global_etr_rloc.store_address(addr_str)
info.etr_port = sport
#
# Put Info-Request hostname (if it was encoded) in private-rloc in
# Info-Reply. Encode it as an AFI=17 distinguished-name.
#
if (info.hostname != None):
info.private_etr_rloc.afi = LISP_AFI_NAME
info.private_etr_rloc.store_address(info.hostname)
#endif
if (rtr_list != None): info.rtr_list = rtr_list
packet = info.encode()
info.print_info()
#
# Send the Info-Reply via the lisp-core process. We are sending from
# a udp46 socket, so we need to prepend ::ffff.
#
lprint("Send Info-Reply to {}".format(red(addr_str, False)))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, sport, packet)
#
# Cache info sources so we can decide to process Map-Requests from it
# specially so we can proxy-Map-Request when the sources are behind NATs.
#
info_source = lisp_info_source(info.hostname, addr_str, sport)
info_source.cache_address_for_info_source()
return
#enddef
#
# lisp_get_signature_eid
#
# Go through the lisp_db_list (database-mappings) and return the first entry
# with signature-eid is True.
#
def lisp_get_signature_eid():
for db in lisp_db_list:
if (db.signature_eid): return(db)
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_port
#
# Find a translated port so we can set it to the inner UDP port number for
# ECM Map-Requests.
#
def lisp_get_any_translated_port():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_port)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_rloc
#
# Find a translated RLOC in any lisp_mapping() from the lisp_db_list. We need
# this to store in an RLE for (S,G) Map-Registers when the ETR is behind NAT
# devies.
#
def lisp_get_any_translated_rloc():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_rloc)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_all_translated_rlocs
#
# Return an array of each translated RLOC address in string format.
#
def lisp_get_all_translated_rlocs():
rloc_list = []
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.is_rloc_translated() == False): continue
addr = rloc_entry.translated_rloc.print_address_no_iid()
rloc_list.append(addr)
#endfor
#endfor
return(rloc_list)
#enddef
#
# lisp_update_default_routes
#
# We are an ITR and we received a new RTR-list from the Map-Server. Update
# the RLOCs of the default map-cache entries if they are different.
#
def lisp_update_default_routes(map_resolver, iid, rtr_list):
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
new_rtr_list = {}
for rloc in rtr_list:
if (rloc == None): continue
addr = rtr_list[rloc]
if (ignore_private and addr.is_private_address()): continue
new_rtr_list[rloc] = addr
#endfor
rtr_list = new_rtr_list
prefix_list = []
for afi in [LISP_AFI_IPV4, LISP_AFI_IPV6, LISP_AFI_MAC]:
if (afi == LISP_AFI_MAC and lisp_l2_overlay == False): break
#
# Do unicast routes. We assume unicast and multicast routes are sync'ed
# with the same RLOC-set.
#
prefix = lisp_address(afi, "", 0, iid)
prefix.make_default_route(prefix)
mc = lisp_map_cache.lookup_cache(prefix, True)
if (mc):
if (mc.checkpoint_entry):
lprint("Updating checkpoint entry for {}".format( \
green(mc.print_eid_tuple(), False)))
elif (mc.do_rloc_sets_match(list(rtr_list.values()))):
continue
#endif
mc.delete_cache()
#endif
prefix_list.append([prefix, ""])
#
# Do multicast routes.
#
group = lisp_address(afi, "", 0, iid)
group.make_default_multicast_route(group)
gmc = lisp_map_cache.lookup_cache(group, True)
if (gmc): gmc = gmc.source_cache.lookup_cache(prefix, True)
if (gmc): gmc.delete_cache()
prefix_list.append([prefix, group])
#endfor
if (len(prefix_list) == 0): return
#
# Build RLOC-set.
#
rloc_set = []
for rtr in rtr_list:
rtr_addr = rtr_list[rtr]
rloc_entry = lisp_rloc()
rloc_entry.rloc.copy_address(rtr_addr)
rloc_entry.priority = 254
rloc_entry.mpriority = 255
rloc_entry.rloc_name = "RTR"
rloc_set.append(rloc_entry)
#endfor
for prefix in prefix_list:
mc = lisp_mapping(prefix[0], prefix[1], rloc_set)
mc.mapping_source = map_resolver
mc.map_cache_ttl = LISP_MR_TTL * 60
mc.add_cache()
lprint("Add {} to map-cache with RTR RLOC-set: {}".format( \
green(mc.print_eid_tuple(), False), list(rtr_list.keys())))
rloc_set = copy.deepcopy(rloc_set)
#endfor
return
#enddef
#
# lisp_process_info_reply
#
# Process received Info-Reply message. Store global RLOC and translated port
# in database-mapping entries if requested.
#
# Returns [global-rloc-address, translated-port-number, new_rtr_set].
#
def lisp_process_info_reply(source, packet, store):
#
# Parse Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return([None, None, False])
info.print_info()
#
# Store RTR list.
#
new_rtr_set = False
for rtr in info.rtr_list:
addr_str = rtr.print_address_no_iid()
if (addr_str in lisp_rtr_list):
if (lisp_register_all_rtrs == False): continue
if (lisp_rtr_list[addr_str] != None): continue
#endif
new_rtr_set = True
lisp_rtr_list[addr_str] = rtr
#endfor
#
# If an ITR, install default map-cache entries.
#
if (lisp_i_am_itr and new_rtr_set):
if (lisp_iid_to_interface == {}):
lisp_update_default_routes(source, lisp_default_iid, lisp_rtr_list)
else:
for iid in list(lisp_iid_to_interface.keys()):
lisp_update_default_routes(source, int(iid), lisp_rtr_list)
#endfor
#endif
#endif
#
# Either store in database-mapping entries or return to caller.
#
if (store == False):
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#endif
#
# If no private-etr-rloc was supplied in the Info-Reply, use the global
# RLOC for all private RLOCs in the database-mapping entries.
#
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
rloc = rloc_entry.rloc
interface = rloc_entry.interface
if (interface == None):
if (rloc.is_null()): continue
if (rloc.is_local() == False): continue
if (info.private_etr_rloc.is_null() == False and
rloc.is_exact_match(info.private_etr_rloc) == False):
continue
#endif
elif (info.private_etr_rloc.is_dist_name()):
rloc_name = info.private_etr_rloc.address
if (rloc_name != rloc_entry.rloc_name): continue
#endif
eid_str = green(db.eid.print_prefix(), False)
rloc_str = red(rloc.print_address_no_iid(), False)
rlocs_match = info.global_etr_rloc.is_exact_match(rloc)
if (rloc_entry.translated_port == 0 and rlocs_match):
lprint("No NAT for {} ({}), EID-prefix {}".format(rloc_str,
interface, eid_str))
continue
#endif
#
# Nothing changed?
#
translated = info.global_etr_rloc
stored = rloc_entry.translated_rloc
if (stored.is_exact_match(translated) and
info.etr_port == rloc_entry.translated_port): continue
lprint("Store translation {}:{} for {} ({}), EID-prefix {}". \
format(red(info.global_etr_rloc.print_address_no_iid(), False),
info.etr_port, rloc_str, interface, eid_str))
rloc_entry.store_translated_rloc(info.global_etr_rloc,
info.etr_port)
#endfor
#endfor
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#enddef
#
# lisp_test_mr
#
# Send Map-Requests for arbitrary EIDs to (1) prime the map-cache and to (2)
# test the RTT of the Map-Resolvers.
#
def lisp_test_mr(lisp_sockets, port):
return
lprint("Test Map-Resolvers")
eid = lisp_address(LISP_AFI_IPV4, "", 0, 0)
eid6 = lisp_address(LISP_AFI_IPV6, "", 0, 0)
#
# Send 10.0.0.1 and 192.168.0.1
#
eid.store_address("10.0.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
eid.store_address("192.168.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
#
# Send 0100::1 and 8000::1.
#
eid6.store_address("0100::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
eid6.store_address("8000::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
#
# Restart periodic timer.
#
lisp_test_mr_timer = threading.Timer(LISP_TEST_MR_INTERVAL, lisp_test_mr,
[lisp_sockets, port])
lisp_test_mr_timer.start()
return
#enddef
#
# lisp_update_local_rloc
#
# Check if local RLOC has changed and update the lisp_rloc() entry in
# lisp_db(). That is check to see if the private address changed since this
# ETR could have moved to another NAT or the same NAT device reassigned a
# new private address.
#
# This function is also used when the interface address is not private. It
# allows us to change the RLOC when the address changes.
#
def lisp_update_local_rloc(rloc):
if (rloc.interface == None): return
addr = lisp_get_interface_address(rloc.interface)
if (addr == None): return
old = rloc.rloc.print_address_no_iid()
new = addr.print_address_no_iid()
if (old == new): return
lprint("Local interface address changed on {} from {} to {}".format( \
rloc.interface, old, new))
rloc.rloc.copy_address(addr)
lisp_myrlocs[0] = addr
return
#enddef
#
# lisp_update_encap_port
#
# Check to see if the encapsulation port changed for an RLOC for the supplied
# map-cache entry.
#
def lisp_update_encap_port(mc):
for rloc in mc.rloc_set:
nat_info = lisp_get_nat_info(rloc.rloc, rloc.rloc_name)
if (nat_info == None): continue
if (rloc.translated_port == nat_info.port): continue
lprint(("Encap-port changed from {} to {} for RLOC {}, " + \
"EID-prefix {}").format(rloc.translated_port, nat_info.port,
red(rloc.rloc.print_address_no_iid(), False),
green(mc.print_eid_tuple(), False)))
rloc.store_translated_rloc(rloc.rloc, nat_info.port)
#endfor
return
#enddef
#
# lisp_timeout_map_cache_entry
#
# Check if a specific map-cache entry needs to be removed due timer expiry.
# If entry does not time out, go through RLOC-set to see if the encapsulation
# port needs updating.
#
# If "program-hardware = yes" is configured, then check a platform specific
# flag (an Arista platform specific command).
#
def lisp_timeout_map_cache_entry(mc, delete_list):
if (mc.map_cache_ttl == None):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
now = lisp_get_timestamp()
last_refresh_time = mc.last_refresh_time
#
# If mapping system runs on this system, disregard packet activity.
# There could be a race condition for active sources, where destinations
# are not registered yet due to system restart. If the LISP subsystem
# is within 5 minutes of restarting, time out native-forward entries.
#
if (lisp_is_running("lisp-ms") and lisp_uptime + (5*60) >= now):
if (mc.action == LISP_NATIVE_FORWARD_ACTION):
last_refresh_time = 0
lprint("Remove startup-mode native-forward map-cache entry")
#endif
#endif
#
# Check refresh timers. Native-Forward entries just return if active,
# else check for encap-port changes for NAT entries. Then return if
# entry still active.
#
if (last_refresh_time + mc.map_cache_ttl > now):
if (mc.action == LISP_NO_ACTION): lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#
# Do not time out NAT-traversal default entries (0.0.0.0/0 and 0::/0).
#
if (lisp_nat_traversal and mc.eid.address == 0 and mc.eid.mask_len == 0):
return([True, delete_list])
#endif
#
# Timed out.
#
ut = lisp_print_elapsed(mc.uptime)
lrt = lisp_print_elapsed(mc.last_refresh_time)
prefix_str = mc.print_eid_tuple()
lprint(("Map-cache entry {} {}, had uptime {}, last-refresh-time {}"). \
format(green(prefix_str, False), bold("timed out", False), ut, lrt))
#
# Add to delete-list to remove after this loop.
#
delete_list.append(mc)
return([True, delete_list])
#enddef
#
# lisp_timeout_map_cache_walk
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_timeout_map_cache_walk(mc, parms):
delete_list = parms[0]
checkpoint_list = parms[1]
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()):
status, delete_list = lisp_timeout_map_cache_entry(mc, delete_list)
if (delete_list == [] or mc != delete_list[-1]):
checkpoint_list = lisp_write_checkpoint_entry(checkpoint_list, mc)
#endif
return([status, parms])
#endif
if (mc.source_cache == None): return([True, parms])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
parms = mc.source_cache.walk_cache(lisp_timeout_map_cache_entry, parms)
return([True, parms])
#enddef
#
# lisp_timeout_map_cache
#
# Look at TTL expiration for each map-cache entry.
#
def lisp_timeout_map_cache(lisp_map_cache):
parms = [[], []]
parms = lisp_map_cache.walk_cache(lisp_timeout_map_cache_walk, parms)
#
# Now remove from lisp_referral_cache all the timed out entries on the
# delete_list[].
#
delete_list = parms[0]
for mc in delete_list: mc.delete_cache()
#
# Write contents of checkpoint_list array to checkpoint file.
#
checkpoint_list = parms[1]
lisp_checkpoint(checkpoint_list)
return
#enddef
#
# lisp_store_nat_info
#
# Store source RLOC and port number of an Info-Request packet sent to port
# 4341 where the packet was translated by a NAT device.
#
# The lisp_nat_state_info{} is a dictionary array with an array a lisp_nat_
# info() values. We keep all the current and previous NAT state associated
# with the Info-Request hostname. This is so we can track how much movement
# is occuring.
#
# Return True if the address and port number changed so the caller can fix up
# RLOCs in map-cache entries.
#
def lisp_store_nat_info(hostname, rloc, port):
addr_str = rloc.print_address_no_iid()
msg = "{} NAT state for {}, RLOC {}, port {}".format("{}",
blue(hostname, False), red(addr_str, False), port)
new_nat_info = lisp_nat_info(addr_str, hostname, port)
if (hostname not in lisp_nat_state_info):
lisp_nat_state_info[hostname] = [new_nat_info]
lprint(msg.format("Store initial"))
return(True)
#endif
#
# The youngest entry is always the first element. So check to see if this
# is a refresh of the youngest (current) entry.
#
nat_info = lisp_nat_state_info[hostname][0]
if (nat_info.address == addr_str and nat_info.port == port):
nat_info.uptime = lisp_get_timestamp()
lprint(msg.format("Refresh existing"))
return(False)
#endif
#
# So the youngest entry is not the newest entry. See if it exists as
# an old entry. If not, we prepend the new state, otherwise, we prepend
# the new state and remove the old state from the array.
#
old_entry = None
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str and nat_info.port == port):
old_entry = nat_info
break
#endif
#endfor
if (old_entry == None):
lprint(msg.format("Store new"))
else:
lisp_nat_state_info[hostname].remove(old_entry)
lprint(msg.format("Use previous"))
#endif
existing = lisp_nat_state_info[hostname]
lisp_nat_state_info[hostname] = [new_nat_info] + existing
return(True)
#enddef
#
# lisp_get_nat_info
#
# Do lookup to get port number to store in map-cache entry as the encapsulation
# port.
#
def lisp_get_nat_info(rloc, hostname):
if (hostname not in lisp_nat_state_info): return(None)
addr_str = rloc.print_address_no_iid()
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str): return(nat_info)
#endfor
return(None)
#enddef
#
# lisp_build_info_requests
#
# Check database-mappings to see if there are any private local RLOCs. If
# so, get the translated global RLOC by sending an Info-Request to a
# Map-Server.
#
# To support multi-homing, that is more than one "interface = <device>"
# rloc sub-command clause, you need the following default routes in the
# kernel so Info-Requests can be load-split across interfaces:
#
# sudo ip route add default via <next-hop> dev eth0
# sudo ip route append default via <another-or-same-next-hop> dev eth1
#
# By having these default routes, we can get the next-hop address for the
# NAT interface we are sending the 4341 Info-Request to install a emphemeral
# static route to force the Info-Request to go out a specific interface.
#
def lisp_build_info_requests(lisp_sockets, dest, port):
if (lisp_nat_traversal == False): return
#
# Send Info-Request to each configured Map-Resolver and exit loop.
# If we don't find one, try finding a Map-Server. We may send Info-
# Request to an RTR to open up NAT state.
#
dest_list = []
mr_list = []
if (dest == None):
for mr in list(lisp_map_resolvers_list.values()):
mr_list.append(mr.map_resolver)
#endif
dest_list = mr_list
if (dest_list == []):
for ms in list(lisp_map_servers_list.values()):
dest_list.append(ms.map_server)
#endfor
#endif
if (dest_list == []): return
else:
dest_list.append(dest)
#endif
#
# Find the NAT-traversed interfaces.
#
rloc_list = {}
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
lisp_update_local_rloc(rloc_entry)
if (rloc_entry.rloc.is_null()): continue
if (rloc_entry.interface == None): continue
addr = rloc_entry.rloc.print_address_no_iid()
if (addr in rloc_list): continue
rloc_list[addr] = rloc_entry.interface
#endfor
#endfor
if (rloc_list == {}):
lprint('Suppress Info-Request, no "interface = <device>" RLOC ' + \
"found in any database-mappings")
return
#endif
#
# Send out Info-Requests out the NAT-traversed interfaces that have
# addresses assigned on them.
#
for addr in rloc_list:
interface = rloc_list[addr]
a = red(addr, False)
lprint("Build Info-Request for private address {} ({})".format(a,
interface))
device = interface if len(rloc_list) > 1 else None
for dest in dest_list:
lisp_send_info_request(lisp_sockets, dest, port, device)
#endfor
#endfor
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
if (mr_list != []):
for mr in list(lisp_map_resolvers_list.values()):
mr.resolve_dns_name()
#endfor
#endif
return
#enddef
#
# lisp_valid_address_format
#
# Check to see if the string is a valid address. We are validating IPv4, IPv6
# and MAC addresses.
#
def lisp_valid_address_format(kw, value):
if (kw != "address"): return(True)
#
# Check if address is a Distinguished-Name. Must have single quotes.
# Check this first because names could have ".", ":", or "-" in them.
#
if (value[0] == "'" and value[-1] == "'"): return(True)
#
# Do IPv4 test for dotted decimal x.x.x.x.
#
if (value.find(".") != -1):
addr = value.split(".")
if (len(addr) != 4): return(False)
for byte in addr:
if (byte.isdigit() == False): return(False)
if (int(byte) > 255): return(False)
#endfor
return(True)
#endif
#
# Test for a geo-prefix. They have N, S, W, E characters in them.
#
if (value.find("-") != -1):
addr = value.split("-")
for i in ["N", "S", "W", "E"]:
if (i in addr):
if (len(addr) < 8): return(False)
return(True)
#endif
#endfor
#endif
#
# Do MAC test in format xxxx-xxxx-xxxx.
#
if (value.find("-") != -1):
addr = value.split("-")
if (len(addr) != 3): return(False)
for hexgroup in addr:
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do IPv6 test in format aaaa:bbbb::cccc:dddd
#
if (value.find(":") != -1):
addr = value.split(":")
if (len(addr) < 2): return(False)
found_null = False
count = 0
for hexgroup in addr:
count += 1
if (hexgroup == ""):
if (found_null):
if (len(addr) == count): break
if (count > 2): return(False)
#endif
found_null = True
continue
#endif
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do E.164 format test. The address is a "+" followed by <= 15 BCD digits.
#
if (value[0] == "+"):
addr = value[1::]
for digit in addr:
if (digit.isdigit() == False): return(False)
#endfor
return(True)
#endif
return(False)
#enddef
#
# lisp_process_api
#
# Used by all lisp processes (not the lisp-core process) to read data
# structures and return them to the LISP process.
#
# Variable data_structure has following format:
#
# "<data-structure-name>%{<dictionary-array-of-parameters>}"
#
# Variable "data_structure" is a string and not a byte string. Caller converts.
#
def lisp_process_api(process, lisp_socket, data_structure):
api_name, parms = data_structure.split("%")
lprint("Process API request '{}', parameters: '{}'".format(api_name,
parms))
data = []
if (api_name == "map-cache"):
if (parms == ""):
data = lisp_map_cache.walk_cache(lisp_process_api_map_cache, data)
else:
data = lisp_process_api_map_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache"):
if (parms == ""):
data = lisp_sites_by_eid.walk_cache(lisp_process_api_site_cache,
data)
else:
data = lisp_process_api_site_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache-summary"):
data = lisp_process_api_site_cache_summary(lisp_sites_by_eid)
#endif
if (api_name == "map-server"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(True, parms)
#endif
if (api_name == "map-resolver"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(False, parms)
#endif
if (api_name == "database-mapping"):
data = lisp_process_api_database_mapping()
#endif
#
# Send IPC back to lisp-core process.
#
data = json.dumps(data)
ipc = lisp_api_ipc(process, data)
lisp_ipc(ipc, lisp_socket, "lisp-core")
return
#enddef
#
# lisp_process_api_map_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_map_cache(mc, data):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_gather_map_cache_data(mc, data))
if (mc.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = mc.source_cache.walk_cache(lisp_gather_map_cache_data, data)
return([True, data])
#enddef
#
# lisp_gather_map_cache_data
#
# Return map-cache to API caller.
#
def lisp_gather_map_cache_data(mc, data):
entry = {}
entry["instance-id"] = str(mc.eid.instance_id)
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
if (mc.group.is_null() == False):
entry["group-prefix"] = mc.group.print_prefix_no_iid()
#endif
entry["uptime"] = lisp_print_elapsed(mc.uptime)
entry["expires"] = lisp_print_elapsed(mc.uptime)
entry["action"] = lisp_map_reply_action_string[mc.action]
entry["ttl"] = "--" if mc.map_cache_ttl == None else \
str(mc.map_cache_ttl / 60)
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in mc.rloc_set:
r = lisp_fill_rloc_in_json(rloc)
#
# If this is a multicast RLOC, then add the array for member RLOCs
# that may have responded to a multicast RLOC-probe.
#
if (rloc.rloc.is_multicast_address()):
r["multicast-rloc-set"] = []
for mrloc in list(rloc.multicast_rloc_probe_list.values()):
mr = lisp_fill_rloc_in_json(mrloc)
r["multicast-rloc-set"].append(mr)
#endfor
#endif
rloc_set.append(r)
#endfor
entry["rloc-set"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_fill_rloc_in_json
#
# Fill in fields from lisp_rloc() into the JSON that is reported via the
# restful API.
#
def lisp_fill_rloc_in_json(rloc):
r = {}
addr_str = None
if (rloc.rloc_exists()):
r["address"] = rloc.rloc.print_address_no_iid()
addr_str = r["address"]
#endif
if (rloc.translated_port != 0):
r["encap-port"] = str(rloc.translated_port)
addr_str += ":" + r["encap-port"]
#endif
if (addr_str and addr_str in lisp_crypto_keys_by_rloc_encap):
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key != None and key.shared_key != None):
r["encap-crypto"] = "crypto-" + key.cipher_suite_string
#endif
#endif
r["state"] = rloc.print_state()
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
stats = rloc.stats.get_stats(False, False)
if (stats): r["stats"] = stats
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
reply = rloc.last_rloc_probe_reply
if (reply):
r["last-rloc-probe-reply"] = lisp_print_elapsed(reply)
r["rloc-probe-rtt"] = str(rloc.rloc_probe_rtt)
#endif
r["rloc-hop-count"] = rloc.rloc_probe_hops
r["recent-rloc-hop-counts"] = rloc.recent_rloc_probe_hops
r["rloc-probe-latency"] = rloc.rloc_probe_latency
r["recent-rloc-probe-latencies"] = rloc.recent_rloc_probe_latencies
recent_rtts = []
for rtt in rloc.recent_rloc_probe_rtts: recent_rtts.append(str(rtt))
r["recent-rloc-probe-rtts"] = recent_rtts
return(r)
#enddef
#
# lisp_process_api_map_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_map_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
dest = eid
source = eid
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if ("group-prefix" in parms):
group.store_prefix(parms["group-prefix"])
dest = group
#endif
data = []
mc = lisp_map_cache_lookup(source, dest)
if (mc): status, data = lisp_process_api_map_cache(mc, data)
return(data)
#enddef
#
# lisp_process_api_site_cache_summary
#
# Returns:
#
# [ { "site" : '<site-name>", "registrations" : [ {"eid-prefix" : "<eid>",
# "count" : "<count>", "registered-count" : "<registered>" }, ... ]
# } ]
#
def lisp_process_api_site_cache_summary(site_cache):
site = { "site" : "", "registrations" : [] }
entry = { "eid-prefix" : "", "count" : 0, "registered-count" : 0 }
sites = {}
for ml in site_cache.cache_sorted:
for se in list(site_cache.cache[ml].entries.values()):
if (se.accept_more_specifics == False): continue
if (se.site.site_name not in sites):
sites[se.site.site_name] = []
#endif
e = copy.deepcopy(entry)
e["eid-prefix"] = se.eid.print_prefix()
e["count"] = len(se.more_specific_registrations)
for mse in se.more_specific_registrations:
if (mse.registered): e["registered-count"] += 1
#endfor
sites[se.site.site_name].append(e)
#endfor
#endfor
data = []
for site_name in sites:
s = copy.deepcopy(site)
s["site"] = site_name
s["registrations"] = sites[site_name]
data.append(s)
#endfor
return(data)
#enddef
#
# lisp_process_api_site_cache
#
# Return site-cache to API caller.
#
def lisp_process_api_site_cache(se, data):
#
# There is only destination state in this site-cache entry.
#
if (se.group.is_null()): return(lisp_gather_site_cache_data(se, data))
if (se.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = se.source_cache.walk_cache(lisp_gather_site_cache_data, data)
return([True, data])
#enddef
#
# lisp_process_api_ms_or_mr
#
# Return map-cache to API caller.
#
def lisp_process_api_ms_or_mr(ms_or_mr, data):
address = lisp_address(LISP_AFI_NONE, "", 0, 0)
dns_name = data["dns-name"] if ("dns-name" in data) else None
if ("address" in data):
address.store_address(data["address"])
#endif
value = {}
if (ms_or_mr):
for ms in list(lisp_map_servers_list.values()):
if (dns_name):
if (dns_name != ms.dns_name): continue
else:
if (address.is_exact_match(ms.map_server) == False): continue
#endif
value["dns-name"] = ms.dns_name
value["address"] = ms.map_server.print_address_no_iid()
value["ms-name"] = "" if ms.ms_name == None else ms.ms_name
return([value])
#endfor
else:
for mr in list(lisp_map_resolvers_list.values()):
if (dns_name):
if (dns_name != mr.dns_name): continue
else:
if (address.is_exact_match(mr.map_resolver) == False): continue
#endif
value["dns-name"] = mr.dns_name
value["address"] = mr.map_resolver.print_address_no_iid()
value["mr-name"] = "" if mr.mr_name == None else mr.mr_name
return([value])
#endfor
#endif
return([])
#enddef
#
# lisp_process_api_database_mapping
#
# Return array of database-mappings configured, include dynamic data like
# translated_rloc in particular.
#
def lisp_process_api_database_mapping():
data = []
for db in lisp_db_list:
entry = {}
entry["eid-prefix"] = db.eid.print_prefix()
if (db.group.is_null() == False):
entry["group-prefix"] = db.group.print_prefix()
#endif
rlocs = []
for r in db.rloc_set:
rloc = {}
if (r.rloc.is_null() == False):
rloc["rloc"] = r.rloc.print_address_no_iid()
#endif
if (r.rloc_name != None): rloc["rloc-name"] = r.rloc_name
if (r.interface != None): rloc["interface"] = r.interface
tr = r.translated_rloc
if (tr.is_null() == False):
rloc["translated-rloc"] = tr.print_address_no_iid()
#endif
if (rloc != {}): rlocs.append(rloc)
#endfor
#
# Add RLOCs array to EID entry.
#
entry["rlocs"] = rlocs
#
# Add EID entry to return array.
#
data.append(entry)
#endfor
return(data)
#enddef
#
# lisp_gather_site_cache_data
#
# Return site-cache to API caller.
#
def lisp_gather_site_cache_data(se, data):
entry = {}
entry["site-name"] = se.site.site_name
entry["instance-id"] = str(se.eid.instance_id)
entry["eid-prefix"] = se.eid.print_prefix_no_iid()
if (se.group.is_null() == False):
entry["group-prefix"] = se.group.print_prefix_no_iid()
#endif
entry["registered"] = "yes" if se.registered else "no"
entry["first-registered"] = lisp_print_elapsed(se.first_registered)
entry["last-registered"] = lisp_print_elapsed(se.last_registered)
addr = se.last_registerer
addr = "none" if addr.is_null() else addr.print_address()
entry["last-registerer"] = addr
entry["ams"] = "yes" if (se.accept_more_specifics) else "no"
entry["dynamic"] = "yes" if (se.dynamic) else "no"
entry["site-id"] = str(se.site_id)
if (se.xtr_id_present):
entry["xtr-id"] = "0x"+ lisp_hex_string(se.xtr_id)
#endif
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in se.registered_rlocs:
r = {}
r["address"] = rloc.rloc.print_address_no_iid() if rloc.rloc_exists() \
else "none"
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, True)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
rloc_set.append(r)
#endfor
entry["registered-rlocs"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_site_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_site_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if ("group-prefix" in parms):
group.store_prefix(parms["group-prefix"])
#endif
data = []
se = lisp_site_eid_lookup(eid, group, False)
if (se): lisp_gather_site_cache_data(se, data)
return(data)
#enddef
#
# lisp_get_interface_instance_id
#
# Return instance-ID from lisp_interface() class.
#
def lisp_get_interface_instance_id(device, source_eid):
interface = None
if (device in lisp_myinterfaces):
interface = lisp_myinterfaces[device]
#endif
#
# Didn't find an instance-ID configured on a "lisp interface", return
# the default.
#
if (interface == None or interface.instance_id == None):
return(lisp_default_iid)
#endif
#
# If there is a single interface data structure for a given device,
# return the instance-ID conifgured for it. Otherwise, check to see
# if this is a multi-tenant EID-prefix. And then test all configured
# prefixes in each lisp_interface() for a best match. This allows
# for multi-tenancy on a single xTR interface.
#
iid = interface.get_instance_id()
if (source_eid == None): return(iid)
save_iid = source_eid.instance_id
best = None
for interface in lisp_multi_tenant_interfaces:
if (interface.device != device): continue
prefix = interface.multi_tenant_eid
source_eid.instance_id = prefix.instance_id
if (source_eid.is_more_specific(prefix) == False): continue
if (best == None or best.multi_tenant_eid.mask_len < prefix.mask_len):
best = interface
#endif
#endfor
source_eid.instance_id = save_iid
if (best == None): return(iid)
return(best.get_instance_id())
#enddef
#
# lisp_allow_dynamic_eid
#
# Returns dynamic-eid-deivce (or device if "dynamic-eid-device" not configured)
# if supplied EID matches configured dynamic-EID in a "lisp interface" command.
# Otherwise, returns None.
#
def lisp_allow_dynamic_eid(device, eid):
if (device not in lisp_myinterfaces): return(None)
interface = lisp_myinterfaces[device]
return_interface = device if interface.dynamic_eid_device == None else \
interface.dynamic_eid_device
if (interface.does_dynamic_eid_match(eid)): return(return_interface)
return(None)
#enddef
#
# lisp_start_rloc_probe_timer
#
# Set the RLOC-probe timer to expire in 1 minute (by default).
#
def lisp_start_rloc_probe_timer(interval, lisp_sockets):
global lisp_rloc_probe_timer
if (lisp_rloc_probe_timer != None): lisp_rloc_probe_timer.cancel()
func = lisp_process_rloc_probe_timer
timer = threading.Timer(interval, func, [lisp_sockets])
lisp_rloc_probe_timer = timer
timer.start()
return
#enddef
#
# lisp_show_rloc_probe_list
#
# Print out the lisp_show_rloc_probe_list in a readable way for debugging.
#
def lisp_show_rloc_probe_list():
lprint(bold("----- RLOC-probe-list -----", False))
for key in lisp_rloc_probe_list:
rloc_array = lisp_rloc_probe_list[key]
lprint("RLOC {}:".format(key))
for r, e, g in rloc_array:
lprint(" [{}, {}, {}, {}]".format(hex(id(r)), e.print_prefix(),
g.print_prefix(), r.translated_port))
#endfor
#endfor
lprint(bold("---------------------------", False))
return
#enddef
#
# lisp_mark_rlocs_for_other_eids
#
# When the parent RLOC that we have RLOC-probe state for comes reachable or
# goes unreachable, set the state appropriately for other EIDs using the SAME
# RLOC. The parent is the first RLOC in the eid-list.
#
def lisp_mark_rlocs_for_other_eids(eid_list):
#
# Don't process parent but put its EID in printed list.
#
rloc, e, g = eid_list[0]
eids = [lisp_print_eid_tuple(e, g)]
for rloc, e, g in eid_list[1::]:
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
eids.append(lisp_print_eid_tuple(e, g))
#endfor
unreach = bold("unreachable", False)
rloc_str = red(rloc.rloc.print_address_no_iid(), False)
for eid in eids:
e = green(eid, False)
lprint("RLOC {} went {} for EID {}".format(rloc_str, unreach, e))
#endfor
#
# For each EID, tell external data-plane about new RLOC-set (RLOCs minus
# the ones that just went unreachable).
#
for rloc, e, g in eid_list:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_process_rloc_probe_timer
#
# Periodic RLOC-probe timer has expired. Go through cached RLOCs from map-
# cache and decide to suppress or rate-limit RLOC-probes. This function
# is also used to time out "unreachability" state so we can start RLOC-probe
# a previously determined unreachable RLOC.
#
def lisp_process_rloc_probe_timer(lisp_sockets):
lisp_set_exception()
lisp_start_rloc_probe_timer(LISP_RLOC_PROBE_INTERVAL, lisp_sockets)
if (lisp_rloc_probing == False): return
#
# Debug code. Must rebuild image to set boolean to True.
#
if (lisp_print_rloc_probe_list): lisp_show_rloc_probe_list()
#
# Check for egress multi-homing.
#
default_next_hops = lisp_get_default_route_next_hops()
lprint("---------- Start RLOC Probing for {} entries ----------".format( \
len(lisp_rloc_probe_list)))
#
# Walk the list.
#
count = 0
probe = bold("RLOC-probe", False)
for values in list(lisp_rloc_probe_list.values()):
#
# Just do one RLOC-probe for the RLOC even if it is used for
# multiple EID-prefixes.
#
last_rloc = None
for parent_rloc, eid, group in values:
addr_str = parent_rloc.rloc.print_address_no_iid()
#
# Do not RLOC-probe gleaned entries if configured.
#
glean, do_probe, y = lisp_allow_gleaning(eid, None, parent_rloc)
if (glean and do_probe == False):
e = green(eid.print_address(), False)
addr_str += ":{}".format(parent_rloc.translated_port)
lprint("Suppress probe to RLOC {} for gleaned EID {}".format( \
red(addr_str, False), e))
continue
#endif
#
# Do not send RLOC-probes to RLOCs that are in down-state or admin-
# down-state. The RLOC-probe reply will apply for all EID-prefixes
# and the RLOC state will be updated for each.
#
if (parent_rloc.down_state()): continue
#
# Do not send multiple RLOC-probes to the same RLOC for
# different EID-prefixes. Multiple RLOC entries could have
# same RLOC address but differnet translated ports. These
# need to be treated as different ETRs (they are both behind
# the same NAT) from an RTR's perspective. On an ITR, if the
# RLOC-names are different for the same RLOC address, we need
# to treat these as different ETRs since an ITR does not keep
# port state for an RLOC.
#
if (last_rloc):
parent_rloc.last_rloc_probe_nonce = \
last_rloc.last_rloc_probe_nonce
if (last_rloc.translated_port == parent_rloc.translated_port \
and last_rloc.rloc_name == parent_rloc.rloc_name):
e = green(lisp_print_eid_tuple(eid, group), False)
lprint("Suppress probe to duplicate RLOC {} for {}". \
format(red(addr_str, False), e))
#
# Copy last-rloc send probe timer, so all EIDs using the
# same RLOC can have sync'ed rtts.
#
parent_rloc.last_rloc_probe = last_rloc.last_rloc_probe
continue
#endif
#endif
nh = None
rloc = None
while (True):
rloc = parent_rloc if rloc == None else rloc.next_rloc
if (rloc == None): break
#
# First check if next-hop/interface is up for egress multi-
# homing.
#
if (rloc.rloc_next_hop != None):
if (rloc.rloc_next_hop not in default_next_hops):
if (rloc.up_state()):
d, n = rloc.rloc_next_hop
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
#endif
unreach = bold("unreachable", False)
lprint("Next-hop {}({}) for RLOC {} is {}".format(n, d,
red(addr_str, False), unreach))
continue
#endif
#endif
#
# Send RLOC-probe to unreach-state RLOCs if down for a minute.
#
last = rloc.last_rloc_probe
delta = 0 if last == None else time.time() - last
if (rloc.unreach_state() and delta < LISP_RLOC_PROBE_INTERVAL):
lprint("Waiting for probe-reply from RLOC {}".format( \
red(addr_str, False)))
continue
#endif
#
# Check to see if we are in nonce-echo mode and no echo has
# been returned.
#
echo_nonce = lisp_get_echo_nonce(None, addr_str)
if (echo_nonce and echo_nonce.request_nonce_timeout()):
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, nonce-echo failed".format( \
red(addr_str, False), unreach))
lisp_update_rtr_updown(rloc.rloc, False)
continue
#endif
#
# Suppress sending RLOC probe if we just a nonce-echo in the
# last minute.
#
if (echo_nonce and echo_nonce.recently_echoed()):
lprint(("Suppress RLOC-probe to {}, nonce-echo " + \
"received").format(red(addr_str, False)))
continue
#endif
#
# Check if we have not received a RLOC-probe reply for one
# timer interval. If not, put RLOC state in "unreach-state".
#
if (rloc.last_rloc_probe != None):
last = rloc.last_rloc_probe_reply
if (last == None): last = 0
delta = time.time() - last
if (rloc.up_state() and \
delta >= LISP_RLOC_PROBE_REPLY_WAIT):
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, probe it".format( \
red(addr_str, False), unreach))
lisp_mark_rlocs_for_other_eids(values)
#endif
#endif
rloc.last_rloc_probe = lisp_get_timestamp()
reach = "" if rloc.unreach_state() == False else " unreachable"
#
# Send Map-Request RLOC-probe. We may have to send one for each
# egress interface to the same RLOC address. Install host
# route in RLOC so we can direct the RLOC-probe on an egress
# interface.
#
nh_str = ""
n = None
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
lisp_install_host_route(addr_str, n, True)
nh_str = ", send on nh {}({})".format(n, d)
#endif
#
# Print integrated log message before sending RLOC-probe.
#
rtt = rloc.print_rloc_probe_rtt()
astr = addr_str
if (rloc.translated_port != 0):
astr += ":{}".format(rloc.translated_port)
#endif
astr= red(astr, False)
if (rloc.rloc_name != None):
astr += " (" + blue(rloc.rloc_name, False) + ")"
#endif
lprint("Send {}{} {}, last rtt: {}{}".format(probe, reach,
astr, rtt, nh_str))
#
# If we are doing multiple egress interfaces, check for host
# routes. We don't want the ones we selected for forwarding to
# affect the path RLOC-probes go out in the following loop. We
# will restore the host route while waiting for RLOC-replies.
# Then we'll select a new host route based on best RTT.
#
if (rloc.rloc_next_hop != None):
nh = lisp_get_host_route_next_hop(addr_str)
if (nh): lisp_install_host_route(addr_str, nh, False)
#endif
#
# Might be first time and other RLOCs on the chain may not
# have RLOC address. Copy now.
#
if (rloc.rloc.is_null()):
rloc.rloc.copy_address(parent_rloc.rloc)
#endif
#
# Send RLOC-probe Map-Request.
#
seid = None if (group.is_null()) else eid
deid = eid if (group.is_null()) else group
lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc)
last_rloc = parent_rloc
#
# Remove installed host route.
#
if (n): lisp_install_host_route(addr_str, n, False)
#endwhile
#
# Reisntall host route for forwarding.
#
if (nh): lisp_install_host_route(addr_str, nh, True)
#
# Send 10 RLOC-probes and then sleep for 20 ms.
#
count += 1
if ((count % 10) == 0): time.sleep(0.020)
#endfor
#endfor
lprint("---------- End RLOC Probing ----------")
return
#enddef
#
# lisp_update_rtr_updown
#
# The lisp-itr process will send an IPC message to the lisp-etr process for
# the RLOC-probe status change for an RTR.
#
def lisp_update_rtr_updown(rtr, updown):
global lisp_ipc_socket
#
# This is only done on an ITR.
#
if (lisp_i_am_itr == False): return
#
# When the xtr-parameter indicates to register all RTRs, we are doing it
# conditionally so we don't care about the status. Suppress IPC messages.
#
if (lisp_register_all_rtrs): return
rtr_str = rtr.print_address_no_iid()
#
# Check if RTR address is in LISP the lisp-itr process learned from the
# map-server.
#
if (rtr_str not in lisp_rtr_list): return
updown = "up" if updown else "down"
lprint("Send ETR IPC message, RTR {} has done {}".format(
red(rtr_str, False), bold(updown, False)))
#
# Build IPC message.
#
ipc = "rtr%{}%{}".format(rtr_str, updown)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#enddef
#
# lisp_process_rloc_probe_reply
#
# We have received a RLOC-probe Map-Reply, process it.
#
def lisp_process_rloc_probe_reply(rloc_entry, source, port, map_reply, ttl,
mrloc):
rloc = rloc_entry.rloc
nonce = map_reply.nonce
hc = map_reply.hop_count
probe = bold("RLOC-probe reply", False)
map_reply_addr = rloc.print_address_no_iid()
source_addr = source.print_address_no_iid()
pl = lisp_rloc_probe_list
jt = rloc_entry.json.json_string if rloc_entry.json else None
ts = lisp_get_timestamp()
#
# If this RLOC-probe reply is in response to a RLOC-probe request to a
# multicast RLOC, then store all responses. Create a lisp_rloc() for new
# entries.
#
if (mrloc != None):
multicast_rloc = mrloc.rloc.print_address_no_iid()
if (map_reply_addr not in mrloc.multicast_rloc_probe_list):
nrloc = lisp_rloc()
nrloc = copy.deepcopy(mrloc)
nrloc.rloc.copy_address(rloc)
nrloc.multicast_rloc_probe_list = {}
mrloc.multicast_rloc_probe_list[map_reply_addr] = nrloc
#endif
nrloc = mrloc.multicast_rloc_probe_list[map_reply_addr]
nrloc.last_rloc_probe_nonce = mrloc.last_rloc_probe_nonce
nrloc.last_rloc_probe = mrloc.last_rloc_probe
r, eid, group = lisp_rloc_probe_list[multicast_rloc][0]
nrloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
mrloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
return
#endif
#
# If we can't find RLOC address from the Map-Reply in the probe-list,
# maybe the same ETR is sending sourcing from a different address. Check
# that address in the probe-list.
#
addr = map_reply_addr
if (addr not in pl):
addr += ":" + str(port)
if (addr not in pl):
addr = source_addr
if (addr not in pl):
addr += ":" + str(port)
lprint(" Received unsolicited {} from {}/{}, port {}". \
format(probe, red(map_reply_addr, False), red(source_addr,
False), port))
return
#endif
#endif
#endif
#
# Look for RLOC in the RLOC-probe list for EID tuple and fix-up stored
# RLOC-probe state.
#
for rloc, eid, group in lisp_rloc_probe_list[addr]:
if (lisp_i_am_rtr):
if (rloc.translated_port != 0 and rloc.translated_port != port):
continue
#endif
#endif
rloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
#endfor
return
#enddef
#
# lisp_db_list_length
#
# Returns the number of entries that need to be registered. This will include
# static and dynamic EIDs.
#
def lisp_db_list_length():
count = 0
for db in lisp_db_list:
count += len(db.dynamic_eids) if db.dynamic_eid_configured() else 1
count += len(db.eid.iid_list)
#endif
return(count)
#endif
#
# lisp_is_myeid
#
# Return true if supplied EID is an EID supported by this ETR. That means a
# longest match lookup is done.
#
def lisp_is_myeid(eid):
for db in lisp_db_list:
if (eid.is_more_specific(db.eid)): return(True)
#endfor
return(False)
#enddef
#
# lisp_format_macs
#
# Take two MAC address strings and format them with dashes and place them in
# a format string "0000-1111-2222 -> 3333-4444-5555" for displaying in
# lisp.dprint().
#
def lisp_format_macs(sa, da):
sa = sa[0:4] + "-" + sa[4:8] + "-" + sa[8:12]
da = da[0:4] + "-" + da[4:8] + "-" + da[8:12]
return("{} -> {}".format(sa, da))
#enddef
#
# lisp_get_echo_nonce
#
# Get lisp_nonce_echo() state from lisp_nonce_echo_list{}.
#
def lisp_get_echo_nonce(rloc, rloc_str):
if (lisp_nonce_echoing == False): return(None)
if (rloc): rloc_str = rloc.print_address_no_iid()
echo_nonce = None
if (rloc_str in lisp_nonce_echo_list):
echo_nonce = lisp_nonce_echo_list[rloc_str]
#endif
return(echo_nonce)
#enddef
#
# lisp_decode_dist_name
#
# When we have reached an AFI=17 in an EID or RLOC record, return the
# distinguished name, and new position of packet.
#
def lisp_decode_dist_name(packet):
count = 0
dist_name = b""
while(packet[0:1] != b"\x00"):
if (count == 255): return([None, None])
dist_name += packet[0:1]
packet = packet[1::]
count += 1
#endwhile
packet = packet[1::]
return(packet, dist_name.decode())
#enddef
#
# lisp_write_flow_log
#
# The supplied flow_log variable is an array of [datetime, lisp_packet]. This
# function is called and run in its own thread and then exits.
#
def lisp_write_flow_log(flow_log):
f = open("./logs/lisp-flow.log", "a")
count = 0
for flow in flow_log:
packet = flow[3]
flow_str = packet.print_flow(flow[0], flow[1], flow[2])
f.write(flow_str)
count += 1
#endfor
f.close()
del(flow_log)
count = bold(str(count), False)
lprint("Wrote {} flow entries to ./logs/lisp-flow.log".format(count))
return
#enddef
#
# lisp_policy_command
#
# Configure "lisp policy" commands for all processes that need it.
#
def lisp_policy_command(kv_pair):
p = lisp_policy("")
set_iid = None
match_set = []
for i in range(len(kv_pair["datetime-range"])):
match_set.append(lisp_policy_match())
#endfor
for kw in list(kv_pair.keys()):
value = kv_pair[kw]
#
# Check for match parameters.
#
if (kw == "instance-id"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
match.source_eid.instance_id = int(v)
match.dest_eid.instance_id = int(v)
#endfor
#endif
if (kw == "source-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.source_eid.instance_id
match.source_eid.store_prefix(v)
match.source_eid.instance_id = iid
#endfor
#endif
if (kw == "destination-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.dest_eid.instance_id
match.dest_eid.store_prefix(v)
match.dest_eid.instance_id = iid
#endfor
#endif
if (kw == "source-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.source_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.source_rloc.store_prefix(v)
#endfor
#endif
if (kw == "destination-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.dest_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.dest_rloc.store_prefix(v)
#endfor
#endif
if (kw == "rloc-record-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rloc_record_name = v
#endfor
#endif
if (kw == "geo-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.geo_name = v
#endfor
#endif
if (kw == "elp-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.elp_name = v
#endfor
#endif
if (kw == "rle-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rle_name = v
#endfor
#endif
if (kw == "json-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.json_name = v
#endfor
#endif
if (kw == "datetime-range"):
for i in range(len(match_set)):
v = value[i]
match = match_set[i]
if (v == ""): continue
l = lisp_datetime(v[0:19])
u = lisp_datetime(v[19::])
if (l.valid_datetime() and u.valid_datetime()):
match.datetime_lower = l
match.datetime_upper = u
#endif
#endfor
#endif
#
# Check for set parameters.
#
if (kw == "set-action"):
p.set_action = value
#endif
if (kw == "set-record-ttl"):
p.set_record_ttl = int(value)
#endif
if (kw == "set-instance-id"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
set_iid = int(value)
p.set_source_eid.instance_id = set_iid
p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-source-eid"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_source_eid.store_prefix(value)
if (set_iid != None): p.set_source_eid.instance_id = set_iid
#endif
if (kw == "set-destination-eid"):
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_dest_eid.store_prefix(value)
if (set_iid != None): p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-rloc-address"):
p.set_rloc_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
p.set_rloc_address.store_address(value)
#endif
if (kw == "set-rloc-record-name"):
p.set_rloc_record_name = value
#endif
if (kw == "set-elp-name"):
p.set_elp_name = value
#endif
if (kw == "set-geo-name"):
p.set_geo_name = value
#endif
if (kw == "set-rle-name"):
p.set_rle_name = value
#endif
if (kw == "set-json-name"):
p.set_json_name = value
#endif
if (kw == "policy-name"):
p.policy_name = value
#endif
#endfor
#
# Store match clauses and policy.
#
p.match_clauses = match_set
p.save_policy()
return
#enddef
lisp_policy_commands = {
"lisp policy" : [lisp_policy_command, {
"policy-name" : [True],
"match" : [],
"instance-id" : [True, 0, 0xffffffff],
"source-eid" : [True],
"destination-eid" : [True],
"source-rloc" : [True],
"destination-rloc" : [True],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"datetime-range" : [True],
"set-action" : [False, "process", "drop"],
"set-record-ttl" : [True, 0, 0x7fffffff],
"set-instance-id" : [True, 0, 0xffffffff],
"set-source-eid" : [True],
"set-destination-eid" : [True],
"set-rloc-address" : [True],
"set-rloc-record-name" : [True],
"set-elp-name" : [True],
"set-geo-name" : [True],
"set-rle-name" : [True],
"set-json-name" : [True] } ]
}
#
# lisp_send_to_arista
#
# Send supplied CLI command to Arista so it can be configured via its design
# rules.
#
def lisp_send_to_arista(command, interface):
interface = "" if (interface == None) else "interface " + interface
cmd_str = command
if (interface != ""): cmd_str = interface + ": " + cmd_str
lprint("Send CLI command '{}' to hardware".format(cmd_str))
commands = '''
enable
configure
{}
{}
'''.format(interface, command)
os.system("FastCli -c '{}'".format(commands))
return
#enddef
#
# lisp_arista_is_alive
#
# Ask hardware if EID-prefix is alive. Return True if so.
#
def lisp_arista_is_alive(prefix):
cmd = "enable\nsh plat trident l3 software routes {}\n".format(prefix)
output = getoutput("FastCli -c '{}'".format(cmd))
#
# Skip over header line.
#
output = output.split("\n")[1]
flag = output.split(" ")
flag = flag[-1].replace("\r", "")
#
# Last column has "Y" or "N" for hit bit.
#
return(flag == "Y")
#enddef
#
# lisp_program_vxlan_hardware
#
# This function is going to populate hardware that can do VXLAN encapsulation.
# It will add an IPv4 route via the kernel pointing to a next-hop on a
# VLAN interface that is being bridged to other potential VTEPs.
#
# The responsibility of this routine is to do the following programming:
#
# route add <eid-prefix> <next-hop>
# arp -s <next-hop> <mac-address>
#
# to the kernel and to do this Arista specific command:
#
# mac address-table static <mac-address> vlan 4094 interface vxlan 1
# vtep <vtep-address>
#
# Assumptions are:
#
# (1) Next-hop address is on the subnet for interface vlan4094.
# (2) VXLAN routing is already setup and will bridge <mac-address> to
# the VTEP address this function supplies.
# (3) A "ip virtual-router mac-address" is configured that will match the
# algorithmic mapping this function is doing between VTEP's IP address
# and the MAC address it will listen on to do VXLAN routing.
#
# The required configuration on the VTEPs are:
#
# vlan 4094
# interface vlan4094
# ip address ... ! <next-hop> above point to subnet
#
# interface Vxlan1
# vxlan source-interface Loopback0
# vxlan vlan 4094 vni 10000
# vxlan flood vtep add 17.17.17.17 ! any address to bring up vlan4094
#
# int loopback0
# ip address a.b.c.d/m ! this is the VTEP or RLOC <vtep-address>
#
# ip virtual-router mac-address 0000.00bb.ccdd
#
def lisp_program_vxlan_hardware(mc):
#
# For now, only do this on an Arista system. There isn't a python
# specific signature so just look to see if /persist/local/lispers.net
# exists.
#
if (os.path.exists("/persist/local/lispers.net") == False): return
#
# If no RLOCs, just return. Otherwise program the first RLOC.
#
if (len(mc.best_rloc_set) == 0): return
#
# Get EID-prefix and RLOC (VTEP address) in string form.
#
eid_prefix = mc.eid.print_prefix_no_iid()
rloc = mc.best_rloc_set[0].rloc.print_address_no_iid()
#
# Check to see if route is already present. If so, just return.
#
route = getoutput("ip route get {} | egrep vlan4094".format( \
eid_prefix))
if (route != ""):
lprint("Route {} already in hardware: '{}'".format( \
green(eid_prefix, False), route))
return
#endif
#
# Look for a vxlan interface and a vlan4094 interface. If they do not
# exist, issue message and return. If we don't have an IP address on
# vlan4094, then exit as well.
#
ifconfig = getoutput("ifconfig | egrep 'vxlan|vlan4094'")
if (ifconfig.find("vxlan") == -1):
lprint("No VXLAN interface found, cannot program hardware")
return
#endif
if (ifconfig.find("vlan4094") == -1):
lprint("No vlan4094 interface found, cannot program hardware")
return
#endif
ipaddr = getoutput("ip addr | egrep vlan4094 | egrep inet")
if (ipaddr == ""):
lprint("No IP address found on vlan4094, cannot program hardware")
return
#endif
ipaddr = ipaddr.split("inet ")[1]
ipaddr = ipaddr.split("/")[0]
#
# Get a unique next-hop IP address on vlan4094's subnet. To be used as
# a handle to get VTEP's mac address. And then that VTEP's MAC address
# is a handle to tell VXLAN to encapsulate IP packet (with frame header)
# to the VTEP address.
#
arp_entries = []
arp_lines = getoutput("arp -i vlan4094").split("\n")
for line in arp_lines:
if (line.find("vlan4094") == -1): continue
if (line.find("(incomplete)") == -1): continue
nh = line.split(" ")[0]
arp_entries.append(nh)
#endfor
nh = None
local = ipaddr
ipaddr = ipaddr.split(".")
for i in range(1, 255):
ipaddr[3] = str(i)
addr = ".".join(ipaddr)
if (addr in arp_entries): continue
if (addr == local): continue
nh = addr
break
#endfor
if (nh == None):
lprint("Address allocation failed for vlan4094, cannot program " + \
"hardware")
return
#endif
#
# Derive MAC address from VTEP address an associate it with the next-hop
# address on vlan4094. This MAC address must be the MAC address on the
# foreign VTEP configure with "ip virtual-router mac-address <mac>".
#
rloc_octets = rloc.split(".")
octet1 = lisp_hex_string(rloc_octets[1]).zfill(2)
octet2 = lisp_hex_string(rloc_octets[2]).zfill(2)
octet3 = lisp_hex_string(rloc_octets[3]).zfill(2)
mac = "00:00:00:{}:{}:{}".format(octet1, octet2, octet3)
arista_mac = "0000.00{}.{}{}".format(octet1, octet2, octet3)
arp_command = "arp -i vlan4094 -s {} {}".format(nh, mac)
os.system(arp_command)
#
# Add VXLAN entry for MAC address.
#
vxlan_command = ("mac address-table static {} vlan 4094 " + \
"interface vxlan 1 vtep {}").format(arista_mac, rloc)
lisp_send_to_arista(vxlan_command, None)
#
# Add route now connecting: eid-prefix -> next-hop -> mac-address ->
# VTEP address.
#
route_command = "ip route add {} via {}".format(eid_prefix, nh)
os.system(route_command)
lprint("Hardware programmed with commands:")
route_command = route_command.replace(eid_prefix, green(eid_prefix, False))
lprint(" " + route_command)
lprint(" " + arp_command)
vxlan_command = vxlan_command.replace(rloc, red(rloc, False))
lprint(" " + vxlan_command)
return
#enddef
#
# lisp_clear_hardware_walk
#
# Remove EID-prefix from kernel.
#
def lisp_clear_hardware_walk(mc, parms):
prefix = mc.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
return([True, None])
#enddef
#
# lisp_clear_map_cache
#
# Just create a new lisp_cache data structure. But if we have to program
# hardware, traverse the map-cache.
#
def lisp_clear_map_cache():
global lisp_map_cache, lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap, lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list, lisp_gleaned_groups
global lisp_no_map_request_rate_limit
clear = bold("User cleared", False)
count = lisp_map_cache.cache_count
lprint("{} map-cache with {} entries".format(clear, count))
if (lisp_program_hardware):
lisp_map_cache.walk_cache(lisp_clear_hardware_walk, None)
#endif
lisp_map_cache = lisp_cache()
#
# Clear rate-limiting temporarily.
#
lisp_no_map_request_rate_limit = lisp_get_timestamp()
#
# Need to clear the RLOC-probe list or else we'll have RLOC-probes
# create incomplete RLOC-records.
#
lisp_rloc_probe_list = {}
#
# Also clear the encap and decap lisp-crypto arrays.
#
lisp_crypto_keys_by_rloc_encap = {}
lisp_crypto_keys_by_rloc_decap = {}
#
# If we are an ITR, clear the RTR-list so a new set of default routes can
# be added when the next Info-Reply comes in.
#
lisp_rtr_list = {}
#
# Clear gleaned groups data structure.
#
lisp_gleaned_groups = {}
#
# Tell external data-plane.
#
lisp_process_data_plane_restart(True)
return
#enddef
#
# lisp_encapsulate_rloc_probe
#
# Input to this function is a RLOC-probe Map-Request and the NAT-traversal
# information for an ETR that sits behind a NAT. We need to get the RLOC-probe
# through the NAT so we have to data encapsulated with a source-port of 4341
# and a destination address and port that was translated by the NAT. That
# information is in the lisp_nat_info() class.
#
def lisp_encapsulate_rloc_probe(lisp_sockets, rloc, nat_info, packet):
if (len(lisp_sockets) != 4): return
local_addr = lisp_myrlocs[0]
#
# Build Map-Request IP header. Source and destination addresses same as
# the data encapsulation outer header.
#
length = len(packet) + 28
ip = struct.pack("BBHIBBHII", 0x45, 0, socket.htons(length), 0, 64,
17, 0, socket.htonl(local_addr.address), socket.htonl(rloc.address))
ip = lisp_ip_checksum(ip)
udp = struct.pack("HHHH", 0, socket.htons(LISP_CTRL_PORT),
socket.htons(length - 20), 0)
#
# Start data encapsulation logic.
#
packet = lisp_packet(ip + udp + packet)
#
# Setup fields we need for lisp_packet.encode().
#
packet.inner_dest.copy_address(rloc)
packet.inner_dest.instance_id = 0xffffff
packet.inner_source.copy_address(local_addr)
packet.inner_ttl = 64
packet.outer_dest.copy_address(rloc)
packet.outer_source.copy_address(local_addr)
packet.outer_version = packet.outer_dest.afi_to_version()
packet.outer_ttl = 64
packet.encap_port = nat_info.port if nat_info else LISP_DATA_PORT
rloc_str = red(rloc.print_address_no_iid(), False)
if (nat_info):
hostname = " {}".format(blue(nat_info.hostname, False))
probe = bold("RLOC-probe request", False)
else:
hostname = ""
probe = bold("RLOC-probe reply", False)
#endif
lprint(("Data encapsulate {} to {}{} port {} for " + \
"NAT-traversal").format(probe, rloc_str, hostname, packet.encap_port))
#
# Build data encapsulation header.
#
if (packet.encode(None) == None): return
packet.print_packet("Send", True)
raw_socket = lisp_sockets[3]
packet.send_packet(raw_socket, packet.outer_dest)
del(packet)
return
#enddef
#
# lisp_get_default_route_next_hops
#
# Put the interface names of each next-hop for the IPv4 default in an array
# and return to caller. The array has elements of [<device>, <nh>].
#
def lisp_get_default_route_next_hops():
#
# Get default route next-hop info differently for MacOS.
#
if (lisp_is_macos()):
cmd = "route -n get default"
fields = getoutput(cmd).split("\n")
gw = interface = None
for f in fields:
if (f.find("gateway: ") != -1): gw = f.split(": ")[1]
if (f.find("interface: ") != -1): interface = f.split(": ")[1]
#endfor
return([[interface, gw]])
#endif
#
# Get default route next-hop info for Linuxes.
#
cmd = "ip route | egrep 'default via'"
default_routes = getoutput(cmd).split("\n")
next_hops = []
for route in default_routes:
if (route.find(" metric ") != -1): continue
r = route.split(" ")
try:
via_index = r.index("via") + 1
if (via_index >= len(r)): continue
dev_index = r.index("dev") + 1
if (dev_index >= len(r)): continue
except:
continue
#endtry
next_hops.append([r[dev_index], r[via_index]])
#endfor
return(next_hops)
#enddef
#
# lisp_get_host_route_next_hop
#
# For already installed host route, get next-hop.
#
def lisp_get_host_route_next_hop(rloc):
cmd = "ip route | egrep '{} via'".format(rloc)
route = getoutput(cmd).split(" ")
try: index = route.index("via") + 1
except: return(None)
if (index >= len(route)): return(None)
return(route[index])
#enddef
#
# lisp_install_host_route
#
# Install/deinstall host route.
#
def lisp_install_host_route(dest, nh, install):
install = "add" if install else "delete"
nh_str = "none" if nh == None else nh
lprint("{} host-route {}, nh {}".format(install.title(), dest, nh_str))
if (nh == None):
ar = "ip route {} {}/32".format(install, dest)
else:
ar = "ip route {} {}/32 via {}".format(install, dest, nh)
#endif
os.system(ar)
return
#enddef
#
# lisp_checkpoint
#
# This function will write entries from the checkpoint array to the checkpoint
# file "lisp.checkpoint".
#
def lisp_checkpoint(checkpoint_list):
if (lisp_checkpoint_map_cache == False): return
f = open(lisp_checkpoint_filename, "w")
for entry in checkpoint_list:
f.write(entry + "\n")
#endfor
f.close()
lprint("{} {} entries to file '{}'".format(bold("Checkpoint", False),
len(checkpoint_list), lisp_checkpoint_filename))
return
#enddef
#
# lisp_load_checkpoint
#
# Read entries from checkpoint file and write to map cache. Check function
# lisp_write_checkpoint_entry() for entry format description.
#
def lisp_load_checkpoint():
if (lisp_checkpoint_map_cache == False): return
if (os.path.exists(lisp_checkpoint_filename) == False): return
f = open(lisp_checkpoint_filename, "r")
count = 0
for entry in f:
count += 1
e = entry.split(" rloc ")
rlocs = [] if (e[1] in ["native-forward\n", "\n"]) else \
e[1].split(", ")
rloc_set = []
for rloc in rlocs:
rloc_entry = lisp_rloc(False)
r = rloc.split(" ")
rloc_entry.rloc.store_address(r[0])
rloc_entry.priority = int(r[1])
rloc_entry.weight = int(r[2])
rloc_set.append(rloc_entry)
#endfor
mc = lisp_mapping("", "", rloc_set)
if (mc != None):
mc.eid.store_prefix(e[0])
mc.checkpoint_entry = True
mc.map_cache_ttl = LISP_NMR_TTL * 60
if (rloc_set == []): mc.action = LISP_NATIVE_FORWARD_ACTION
mc.add_cache()
continue
#endif
count -= 1
#endfor
f.close()
lprint("{} {} map-cache entries from file '{}'".format(
bold("Loaded", False), count, lisp_checkpoint_filename))
return
#enddef
#
# lisp_write_checkpoint_entry
#
# Write one map-cache entry to checkpoint array list. The format of a
# checkpoint entry is:
#
# [<iid>]<eid-prefix> rloc <rloc>, <rloc>, ...
#
# where <rloc> is formatted as:
#
# <rloc-address> <priority> <weight>
#
def lisp_write_checkpoint_entry(checkpoint_list, mc):
if (lisp_checkpoint_map_cache == False): return
entry = "{} rloc ".format(mc.eid.print_prefix())
for rloc_entry in mc.rloc_set:
if (rloc_entry.rloc.is_null()): continue
entry += "{} {} {}, ".format(rloc_entry.rloc.print_address_no_iid(),
rloc_entry.priority, rloc_entry.weight)
#endfor
if (mc.rloc_set != []):
entry = entry[0:-2]
elif (mc.action == LISP_NATIVE_FORWARD_ACTION):
entry += "native-forward"
#endif
checkpoint_list.append(entry)
return
#enddef
#
# lisp_check_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_check_dp_socket():
socket_name = lisp_ipc_dp_socket_name
if (os.path.exists(socket_name) == False):
dne = bold("does not exist", False)
lprint("Socket '{}' {}".format(socket_name, dne))
return(False)
#endif
return(True)
#enddef
#
# lisp_write_to_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_write_to_dp_socket(entry):
try:
rec = json.dumps(entry)
write = bold("Write IPC", False)
lprint("{} record to named socket: '{}'".format(write, rec))
lisp_ipc_dp_socket.sendto(rec, lisp_ipc_dp_socket_name)
except:
lprint("Failed to write IPC record to named socket: '{}'".format(rec))
#endtry
return
#enddef
#
# lisp_write_ipc_keys
#
# Security keys have changed for an RLOC. Find all map-cache entries that are
# affected. The lisp_rloc_probe_rlocs has the list of EIDs for a given RLOC
# address. Tell the external data-plane for each one.
#
def lisp_write_ipc_keys(rloc):
addr_str = rloc.rloc.print_address_no_iid()
port = rloc.translated_port
if (port != 0): addr_str += ":" + str(port)
if (addr_str not in lisp_rloc_probe_list): return
for r, e, g in lisp_rloc_probe_list[addr_str]:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc == None): continue
lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_write_ipc_map_cache
#
# Write a map-cache entry to named socket "lisp-ipc-data-plane".
#
def lisp_write_ipc_map_cache(add_or_delete, mc, dont_send=False):
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format.
#
add = "add" if add_or_delete else "delete"
entry = { "type" : "map-cache", "opcode" : add }
multicast = (mc.group.is_null() == False)
if (multicast):
entry["eid-prefix"] = mc.group.print_prefix_no_iid()
entry["rles"] = []
else:
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
entry["rlocs"] = []
#endif
entry["instance-id"] = str(mc.eid.instance_id)
if (multicast):
if (len(mc.rloc_set) >= 1 and mc.rloc_set[0].rle):
for rle_node in mc.rloc_set[0].rle.rle_forwarding_list:
addr = rle_node.address.print_address_no_iid()
port = str(4341) if rle_node.translated_port == 0 else \
str(rle_node.translated_port)
r = { "rle" : addr, "port" : port }
ekey, ikey = rle_node.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rles"].append(r)
#endfor
#endif
else:
for rloc in mc.rloc_set:
if (rloc.rloc.is_ipv4() == False and rloc.rloc.is_ipv6() == False):
continue
#endif
if (rloc.up_state() == False): continue
port = str(4341) if rloc.translated_port == 0 else \
str(rloc.translated_port)
r = { "rloc" : rloc.rloc.print_address_no_iid(), "priority" :
str(rloc.priority), "weight" : str(rloc.weight), "port" :
port }
ekey, ikey = rloc.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rlocs"].append(r)
#endfor
#endif
if (dont_send == False): lisp_write_to_dp_socket(entry)
return(entry)
#enddef
#
# lisp_write_ipc_decap_key
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_decap_key(rloc_addr, keys):
if (lisp_i_am_itr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Get decryption key. If there is none, do not send message.
#
if (keys == None or len(keys) == 0 or keys[1] == None): return
ekey = keys[1].encrypt_key
ikey = keys[1].icv_key
#
# Write record in JSON format. Store encryption key.
#
rp = rloc_addr.split(":")
if (len(rp) == 1):
entry = { "type" : "decap-keys", "rloc" : rp[0] }
else:
entry = { "type" : "decap-keys", "rloc" : rp[0], "port" : rp[1] }
#endif
entry = lisp_build_json_keys(entry, ekey, ikey, "decrypt-key")
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_build_json_keys
#
# Build the following for both the ITR encryption side and the ETR decryption
# side.
#
def lisp_build_json_keys(entry, ekey, ikey, key_type):
if (ekey == None): return(entry)
entry["keys"] = []
key = { "key-id" : "1", key_type : ekey, "icv-key" : ikey }
entry["keys"].append(key)
return(entry)
#enddef
#
# lisp_write_ipc_database_mappings
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_database_mappings(ephem_port):
if (lisp_i_am_etr == False): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "database-mappings", "database-mappings" : [] }
#
# Write only IPv4 and IPv6 EIDs.
#
for db in lisp_db_list:
if (db.eid.is_ipv4() == False and db.eid.is_ipv6() == False): continue
record = { "instance-id" : str(db.eid.instance_id),
"eid-prefix" : db.eid.print_prefix_no_iid() }
entry["database-mappings"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
#
# Write ephemeral NAT port an external data-plane needs to receive
# encapsulated packets from the RTR.
#
entry = { "type" : "etr-nat-port", "port" : ephem_port }
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_write_ipc_interfaces
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_interfaces():
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "interfaces", "interfaces" : [] }
for interface in list(lisp_myinterfaces.values()):
if (interface.instance_id == None): continue
record = { "interface" : interface.device,
"instance-id" : str(interface.instance_id) }
entry["interfaces"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_parse_auth_key
#
# Look for values for "authentication-key" in the various forms of:
#
# <password>
# [<key-id>]<password>
# [<key-id>]<password> [<key-id>]<password> [<key-id>]<password>
#
# Return a auth_key{} where the keys from the dictionary array are type
# integers and the values are type string.
#
def lisp_parse_auth_key(value):
values = value.split("[")
auth_key = {}
if (len(values) == 1):
auth_key[0] = value
return(auth_key)
#endif
for v in values:
if (v == ""): continue
index = v.find("]")
key_id = v[0:index]
try: key_id = int(key_id)
except: return
auth_key[key_id] = v[index+1::]
#endfor
return(auth_key)
#enddef
#
# lisp_reassemble
#
# Reassemble an IPv4 datagram. The result is a LISP encapsulated packet.
#
# An entry in the queue is a multi-tuple of:
#
# <frag-offset>, <frag-length>, <packet-with-header>, <last-frag-is-true>
#
# When it is not a LISP/VXLAN encapsualted packet, the multi-tuple will be
# for the first fragment:
#
# <frag-offset>, <frag-length>, None, <last-frag-is-true>
#
def lisp_reassemble(packet):
fo = socket.ntohs(struct.unpack("H", packet[6:8])[0])
#
# Not a fragment, return packet and process.
#
if (fo == 0 or fo == 0x4000): return(packet)
#
# Get key fields from fragment.
#
ident = socket.ntohs(struct.unpack("H", packet[4:6])[0])
fl = socket.ntohs(struct.unpack("H", packet[2:4])[0])
last_frag = (fo & 0x2000 == 0 and (fo & 0x1fff) != 0)
entry = [(fo & 0x1fff) * 8, fl - 20, packet, last_frag]
#
# If first fragment, check to see if LISP packet. Do not reassemble if
# source or destination port is not 4341, 8472 or 4789. But add this to
# the queue so when other fragments come in, we know to not queue them.
# If other fragments came in before the first fragment, remove them from
# the queue.
#
if (fo == 0x2000):
sport, dport = struct.unpack("HH", packet[20:24])
sport = socket.ntohs(sport)
dport = socket.ntohs(dport)
if (dport not in [4341, 8472, 4789] and sport != 4341):
lisp_reassembly_queue[ident] = []
entry[2] = None
#endif
#endif
#
# Initialized list if first fragment. Indexed by IPv4 Ident.
#
if (ident not in lisp_reassembly_queue):
lisp_reassembly_queue[ident] = []
#endif
#
# Get fragment queue based on IPv4 Ident.
#
queue = lisp_reassembly_queue[ident]
#
# Do not queue fragment if first fragment arrived and we determined its
# not a LISP encapsulated packet.
#
if (len(queue) == 1 and queue[0][2] == None):
dprint("Drop non-LISP encapsulated fragment 0x{}".format( \
lisp_hex_string(ident).zfill(4)))
return(None)
#endif
#
# Insert in sorted order.
#
queue.append(entry)
queue = sorted(queue)
#
# Print addresses.
#
addr = lisp_address(LISP_AFI_IPV4, "", 32, 0)
addr.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
src = addr.print_address_no_iid()
addr.address = socket.ntohl(struct.unpack("I", packet[16:20])[0])
dst = addr.print_address_no_iid()
addr = red("{} -> {}".format(src, dst), False)
dprint("{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}".format( \
bold("Received", False), " non-LISP encapsulated" if \
entry[2] == None else "", addr, lisp_hex_string(ident).zfill(4),
lisp_hex_string(fo).zfill(4)))
#
# Check if all fragments arrived. First check if first and last fragments
# are in queue.
#
if (queue[0][0] != 0 or queue[-1][3] == False): return(None)
last_entry = queue[0]
for frag in queue[1::]:
fo = frag[0]
last_fo, last_fl = last_entry[0], last_entry[1]
if (last_fo + last_fl != fo): return(None)
last_entry = frag
#endfor
lisp_reassembly_queue.pop(ident)
#
# If we did not return, we have all fragments. Now append them. Keep the
# IP header in the first fragment but remove in each other fragment.
#
packet = queue[0][2]
for frag in queue[1::]: packet += frag[2][20::]
dprint("{} fragments arrived for packet 0x{}, length {}".format( \
bold("All", False), lisp_hex_string(ident).zfill(4), len(packet)))
#
# Fix length and frag-offset field before returning and fixup checksum.
#
length = socket.htons(len(packet))
header = packet[0:2] + struct.pack("H", length) + packet[4:6] + \
struct.pack("H", 0) + packet[8:10] + struct.pack("H", 0) + \
packet[12:20]
header = lisp_ip_checksum(header)
return(header + packet[20::])
#enddef
#
# lisp_get_crypto_decap_lookup_key
#
# Return None if we cannot find <addr>:<<port> or <addr>:0 in lisp_crypto_
# keys_by_rloc_decap{}.
#
def lisp_get_crypto_decap_lookup_key(addr, port):
addr_str = addr.print_address_no_iid() + ":" + str(port)
if (addr_str in lisp_crypto_keys_by_rloc_decap): return(addr_str)
addr_str = addr.print_address_no_iid()
if (addr_str in lisp_crypto_keys_by_rloc_decap): return(addr_str)
#
# We are at non-NAT based xTR. We need to get the keys from an RTR
# or another non-NAT based xTR. Move addr+port to addr.
#
for ap in lisp_crypto_keys_by_rloc_decap:
a = ap.split(":")
if (len(a) == 1): continue
a = a[0] if len(a) == 2 else ":".join(a[0:-1])
if (a == addr_str):
keys = lisp_crypto_keys_by_rloc_decap[ap]
lisp_crypto_keys_by_rloc_decap[addr_str] = keys
return(addr_str)
#endif
#endfor
return(None)
#enddef
#
# lisp_build_crypto_decap_lookup_key
#
# Decide to return <addr>:<port> or <addr> depending if the RLOC is behind
# a NAT. This is used on the RTR. Check the lisp probing cache. If we find
# an RLOC with a port number stored, then it is behind a NAT. Otherwise,
# the supplied port is not relevant and we want to create a "port-less" decap
# entry for an xTR that is in public address space.
#
def lisp_build_crypto_decap_lookup_key(addr, port):
addr = addr.print_address_no_iid()
addr_and_port = addr + ":" + str(port)
if (lisp_i_am_rtr):
if (addr in lisp_rloc_probe_list): return(addr)
#
# Have to check NAT cache to see if RLOC is translated. If not, this
# is an xTR in public space. We'll have to change this in the future
# so we don't do a full table traversal. But this only happensu
#
for nat_info in list(lisp_nat_state_info.values()):
for nat in nat_info:
if (addr == nat.address): return(addr_and_port)
#endfor
#endif
return(addr)
#endif
return(addr_and_port)
#enddef
#
# lisp_set_ttl
#
# Set send IP TTL for outgoing packet.
#
def lisp_set_ttl(lisp_socket, ttl):
try:
lisp_socket.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
lisp_socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, ttl)
except:
lprint("socket.setsockopt(IP_TTL) not supported")
pass
#endtry
return
#enddef
#
# lisp_is_rloc_probe_request
#
# Pass LISP first byte to test for 0x12, a Map-Request RLOC-probe.
#
def lisp_is_rloc_probe_request(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x12)
#enddef
#
# lisp_is_rloc_probe_reply
#
# Pass LISP first byte to test for 0x28, a Map-Reply RLOC-probe.
#
def lisp_is_rloc_probe_reply(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x28)
#enddef
#
# lisp_is_rloc_probe
#
# If this is a RLOC-probe received by the data-plane (from a pcap filter),
# then return source address, source port, ttl, and position packet to the
# beginning of the LISP header. The packet pointer entering this function is
# the beginning of an IPv4 header.
#
# If rr (request-or-reply) is:
#
# 0: Check for Map-Request RLOC-probe (ETR case)
# 1: Check for Map-Reply RLOC-probe (ITR case)
# -1: Check for either (RTR case)
#
# Return packet pointer untouched if not an RLOC-probe. If it is an RLOC-probe
# request or reply from ourselves, return packet pointer None and source None.
#
def lisp_is_rloc_probe(packet, rr):
udp = (struct.unpack("B", packet[9:10])[0] == 17)
if (udp == False): return([packet, None, None, None])
sport = struct.unpack("H", packet[20:22])[0]
dport = struct.unpack("H", packet[22:24])[0]
is_lisp = (socket.htons(LISP_CTRL_PORT) in [sport, dport])
if (is_lisp == False): return([packet, None, None, None])
if (rr == 0):
probe = lisp_is_rloc_probe_request(packet[28:29])
if (probe == False): return([packet, None, None, None])
elif (rr == 1):
probe = lisp_is_rloc_probe_reply(packet[28:29])
if (probe == False): return([packet, None, None, None])
elif (rr == -1):
probe = lisp_is_rloc_probe_request(packet[28:29])
if (probe == False):
probe = lisp_is_rloc_probe_reply(packet[28:29])
if (probe == False): return([packet, None, None, None])
#endif
#endif
#
# Get source address, source port, and TTL. Decrement TTL.
#
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
#
# If this is a RLOC-probe from ourselves, drop.
#
if (source.is_local()): return([None, None, None, None])
#
# Accept, and return source, port, and ttl to caller.
#
source = source.print_address_no_iid()
port = socket.ntohs(struct.unpack("H", packet[20:22])[0])
ttl = struct.unpack("B", packet[8:9])[0] - 1
packet = packet[28::]
r = bold("Receive(pcap)", False)
f = bold("from " + source, False)
p = lisp_format_packet(packet)
lprint("{} {} bytes {} {}, packet: {}".format(r, len(packet), f, port, p))
return([packet, source, port, ttl])
#enddef
#
# lisp_ipc_write_xtr_parameters
#
# When an external data-plane is running, write the following parameters
# to it:
#
# ipc = { "type" : "xtr-parameters", "control-plane-logging" : False,
# "data-plane-logging" : False, "rtr" : False }
#
def lisp_ipc_write_xtr_parameters(cp, dp):
if (lisp_ipc_dp_socket == None): return
ipc = { "type" : "xtr-parameters", "control-plane-logging" : cp,
"data-plane-logging" : dp, "rtr" : lisp_i_am_rtr }
lisp_write_to_dp_socket(ipc)
return
#enddef
#
# lisp_external_data_plane
#
# Return True if an external data-plane is running. That means that "ipc-data-
# plane = yes" is configured or the lisp-xtr go binary is running.
#
def lisp_external_data_plane():
cmd = 'egrep "ipc-data-plane = yes" ./lisp.config'
if (getoutput(cmd) != ""): return(True)
if (os.getenv("LISP_RUN_LISP_XTR") != None): return(True)
return(False)
#enddef
#
# lisp_process_data_plane_restart
#
# The external data-plane has restarted. We will touch the lisp.config file so
# all configuration information is sent and then traverse the map-cache
# sending each entry to the data-plane so it can regain its state.
#
# This function will also clear the external data-plane map-cache when a user
# clears the map-cache in the lisp-itr or lisp-rtr process.
#
# { "type" : "restart" }
#
def lisp_process_data_plane_restart(do_clear=False):
os.system("touch ./lisp.config")
jdata = { "type" : "entire-map-cache", "entries" : [] }
if (do_clear == False):
entries = jdata["entries"]
lisp_map_cache.walk_cache(lisp_ipc_walk_map_cache, entries)
#endif
lisp_write_to_dp_socket(jdata)
return
#enddef
#
# lisp_process_data_plane_stats
#
# { "type" : "statistics", "entries" :
# [ { "instance-id" : "<iid>", "eid-prefix" : "<eid>", "rlocs" : [
# { "rloc" : "<rloc-1>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : "<timestamp>" }, ...
# { "rloc" : "<rloc-n>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <system-uptime> } ], ... }
# ]
# }
#
def lisp_process_data_plane_stats(msg, lisp_sockets, lisp_port):
if ("entries" not in msg):
lprint("No 'entries' in stats IPC message")
return
#endif
if (type(msg["entries"]) != list):
lprint("'entries' in stats IPC message must be an array")
return
#endif
for msg in msg["entries"]:
if ("eid-prefix" not in msg):
lprint("No 'eid-prefix' in stats IPC message")
continue
#endif
eid_str = msg["eid-prefix"]
if ("instance-id" not in msg):
lprint("No 'instance-id' in stats IPC message")
continue
#endif
iid = int(msg["instance-id"])
#
# Lookup EID-prefix in map-cache.
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(eid_str)
mc = lisp_map_cache_lookup(None, eid)
if (mc == None):
lprint("Map-cache entry for {} not found for stats update". \
format(eid_str))
continue
#endif
if ("rlocs" not in msg):
lprint("No 'rlocs' in stats IPC message for {}".format( \
eid_str))
continue
#endif
if (type(msg["rlocs"]) != list):
lprint("'rlocs' in stats IPC message must be an array")
continue
#endif
ipc_rlocs = msg["rlocs"]
#
# Loop through RLOCs in IPC message.
#
for ipc_rloc in ipc_rlocs:
if ("rloc" not in ipc_rloc): continue
rloc_str = ipc_rloc["rloc"]
if (rloc_str == "no-address"): continue
rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
rloc.store_address(rloc_str)
rloc_entry = mc.get_rloc(rloc)
if (rloc_entry == None): continue
#
# Update stats.
#
pc = 0 if ("packet-count" not in ipc_rloc) else \
ipc_rloc["packet-count"]
bc = 0 if ("byte-count" not in ipc_rloc) else \
ipc_rloc["byte-count"]
ts = 0 if ("seconds-last-packet" not in ipc_rloc) else \
ipc_rloc["seconds-last-packet"]
rloc_entry.stats.packet_count += pc
rloc_entry.stats.byte_count += bc
rloc_entry.stats.last_increment = lisp_get_timestamp() - ts
lprint("Update stats {}/{}/{}s for {} RLOC {}".format(pc, bc,
ts, eid_str, rloc_str))
#endfor
#
# Check if this map-cache entry needs refreshing.
#
if (mc.group.is_null() and mc.has_ttl_elapsed()):
eid_str = green(mc.print_eid_tuple(), False)
lprint("Refresh map-cache entry {}".format(eid_str))
lisp_send_map_request(lisp_sockets, lisp_port, None, mc.eid, None)
#endif
#endfor
return
#enddef
#
# lisp_process_data_plane_decap_stats
#
# { "type" : "decap-statistics",
# "no-decrypt-key" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "outer-header-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "bad-inner-version" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "good-packets" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "ICV-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "checksum-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> }
# }
#
# If are an RTR, we can process the stats directly. If are an ITR we need
# to send an IPC message the the lisp-etr process.
#
# Variable "msg" is a string and not a byte string. Caller converts.
#
def lisp_process_data_plane_decap_stats(msg, lisp_ipc_socket):
#
# Send IPC message to lisp-etr process. Variable 'msg' is a dict array.
# Needs to be passed in IPC message as a string.
#
if (lisp_i_am_itr):
lprint("Send decap-stats IPC message to lisp-etr process")
ipc = "stats%{}".format(json.dumps(msg))
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#endif
#
# Process stats counters in lisp-etr and lisp-rtr processes. Variable 'msg'
# is a dictionary array when the ITR/RTR is processing msg. When an ETR
# is processing it, it recevied a json string from the ITR so it needs
# to convert to a dictionary array.
#
ipc = bold("IPC", False)
lprint("Process decap-stats {} message: '{}'".format(ipc, msg))
if (lisp_i_am_etr): msg = json.loads(msg)
key_names = ["good-packets", "ICV-error", "checksum-error",
"lisp-header-error", "no-decrypt-key", "bad-inner-version",
"outer-header-error"]
for key_name in key_names:
pc = 0 if (key_name not in msg) else msg[key_name]["packet-count"]
lisp_decap_stats[key_name].packet_count += pc
bc = 0 if (key_name not in msg) else msg[key_name]["byte-count"]
lisp_decap_stats[key_name].byte_count += bc
ts = 0 if (key_name not in msg) else \
msg[key_name]["seconds-last-packet"]
lisp_decap_stats[key_name].last_increment = lisp_get_timestamp() - ts
#endfor
return
#enddef
#
# lisp_process_punt
#
# Another data-plane is punting a packet to us so we can discover a source
# EID, send a map-request, or store statistics data. The format of the JSON
# messages are for types: "discovery", "restart", "statistics", and "decap-
# statistics". This function calls functions for the stats and restart types
# but this function processes logic for:
#
# { "type" : "discovery", "source-eid" : <eid-source-address>,
# "dest-eid" : <eid-dest-address>, "interface" : "<device-name>",
# "instance-id" : <iid> }
#
# And:
#
def lisp_process_punt(punt_socket, lisp_send_sockets, lisp_ephem_port):
message, source = punt_socket.recvfrom(4000)
msg = json.loads(message)
if (type(msg) != dict):
lprint("Invalid punt message from {}, not in JSON format". \
format(source))
return
#endif
punt = bold("Punt", False)
lprint("{} message from '{}': '{}'".format(punt, source, msg))
if ("type" not in msg):
lprint("Punt IPC message has no 'type' key")
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "statistics"):
lisp_process_data_plane_stats(msg, lisp_send_sockets, lisp_ephem_port)
return
#endif
if (msg["type"] == "decap-statistics"):
lisp_process_data_plane_decap_stats(msg, punt_socket)
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "restart"):
lisp_process_data_plane_restart()
return
#endif
#
# Process possible punt packet discovery message.
#
if (msg["type"] != "discovery"):
lprint("Punt IPC message has wrong format")
return
#endif
if ("interface" not in msg):
lprint("Invalid punt message from {}, required keys missing". \
format(source))
return
#endif
#
# Drop control-messages designated as instance-ID 0xffffff (or -1 in JSON).
#
device = msg["interface"]
if (device == ""):
iid = int(msg["instance-id"])
if (iid == -1): return
else:
iid = lisp_get_interface_instance_id(device, None)
#endif
#
# Validate EID format.
#
seid = None
if ("source-eid" in msg):
source_eid = msg["source-eid"]
seid = lisp_address(LISP_AFI_NONE, source_eid, 0, iid)
if (seid.is_null()):
lprint("Invalid source-EID format '{}'".format(source_eid))
return
#endif
#endif
deid = None
if ("dest-eid" in msg):
dest_eid = msg["dest-eid"]
deid = lisp_address(LISP_AFI_NONE, dest_eid, 0, iid)
if (deid.is_null()):
lprint("Invalid dest-EID format '{}'".format(dest_eid))
return
#endif
#endif
#
# Do source-EID discovery.
#
# Make sure we have a configured database-mapping entry for this EID.
#
if (seid):
e = green(seid.print_address(), False)
db = lisp_db_for_lookups.lookup_cache(seid, False)
if (db != None):
#
# Check accept policy and if accepted, discover EID by putting
# in discovery cache. ETR will register it.
#
if (db.dynamic_eid_configured()):
interface = lisp_allow_dynamic_eid(device, seid)
if (interface != None and lisp_i_am_itr):
lisp_itr_discover_eid(db, seid, device, interface)
else:
lprint(("Disallow dynamic source-EID {} " + \
"on interface {}").format(e, device))
#endif
#endif
else:
lprint("Punt from non-EID source {}".format(e))
#endif
#endif
#
# Do Map-Request processing on destination.
#
if (deid):
mc = lisp_map_cache_lookup(seid, deid)
if (mc == None or lisp_mr_or_pubsub(mc.action)):
#
# Check if we should rate-limit Map-Request and if not send
# Map-Request.
#
if (lisp_rate_limit_map_request(deid)): return
pubsub = (mc and mc.action == LISP_SEND_PUBSUB_ACTION)
lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
seid, deid, None, pubsub)
else:
e = green(deid.print_address(), False)
lprint("Map-cache entry for {} already exists".format(e))
#endif
#endif
return
#enddef
#
# lisp_ipc_map_cache_entry
#
# Callback from class lisp_cache.walk_cache().
#
def lisp_ipc_map_cache_entry(mc, jdata):
entry = lisp_write_ipc_map_cache(True, mc, dont_send=True)
jdata.append(entry)
return([True, jdata])
#enddef
#
# lisp_ipc_walk_map_cache
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_ipc_walk_map_cache(mc, jdata):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_ipc_map_cache_entry(mc, jdata))
if (mc.source_cache == None): return([True, jdata])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
jdata = mc.source_cache.walk_cache(lisp_ipc_map_cache_entry, jdata)
return([True, jdata])
#enddef
#
# lisp_itr_discover_eid
#
# Put dynamic-EID in db.dynamic_eids{} array.
#
def lisp_itr_discover_eid(db, eid, input_interface, routed_interface,
lisp_ipc_listen_socket):
eid_str = eid.print_address()
if (eid_str in db.dynamic_eids):
db.dynamic_eids[eid_str].last_packet = lisp_get_timestamp()
return
#endif
#
# Add to list.
#
dyn_eid = lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = routed_interface
dyn_eid.last_packet = lisp_get_timestamp()
dyn_eid.get_timeout(routed_interface)
db.dynamic_eids[eid_str] = dyn_eid
routed = ""
if (input_interface != routed_interface):
routed = ", routed-interface " + routed_interface
#endif
eid_string = green(eid_str, False) + bold(" discovered", False)
lprint("Dynamic-EID {} on interface {}{}, timeout {}".format( \
eid_string,input_interface, routed, dyn_eid.timeout))
#
# Tell ETR process so it can register dynamic-EID.
#
ipc = "learn%{}%{}".format(eid_str, routed_interface)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_retry_decap_keys
#
# A decap-key was copied from x.x.x.x:p to x.x.x.x, but it was the wrong one.
# Copy x.x.x.x.q to x.x.x.x. This is an expensive function. But it is hardly
# used. And once it is used for a particular addr_str, it shouldn't be used
# again.
#
# This function is only used when an ICV error occurs when x.x.x.x is the
# crypto-key used.
#
def lisp_retry_decap_keys(addr_str, packet, iv, packet_icv):
if (lisp_search_decap_keys == False): return
#
# Only use this function when the key matched was not port based.
#
if (addr_str.find(":") != -1): return
parent = lisp_crypto_keys_by_rloc_decap[addr_str]
for key in lisp_crypto_keys_by_rloc_decap:
#
# Find entry that has same source RLOC.
#
if (key.find(addr_str) == -1): continue
#
# Skip over parent entry.
#
if (key == addr_str): continue
#
# If crypto-keys the same, go to find next one.
#
entry = lisp_crypto_keys_by_rloc_decap[key]
if (entry == parent): continue
#
# Try ICV check. If works, then go to this key.
#
crypto_key = entry[1]
if (packet_icv != crypto_key.do_icv(packet, iv)):
lprint("Test ICV with key {} failed".format(red(key, False)))
continue
#endif
lprint("Changing decap crypto key to {}".format(red(key, False)))
lisp_crypto_keys_by_rloc_decap[addr_str] = entry
#endif
return
#enddef
#
# lisp_decent_pull_xtr_configured
#
# Return True if configured LISP-Decent modulus is not 0. Meaning we are using
# the LISP-Decent pull-based mapping system.
#
def lisp_decent_pull_xtr_configured():
return(lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None)
#enddef
#
# lisp_is_decent_dns_suffix
#
# Return True if supplied DNS name ends with a configured LISP-Decent DNS
# suffix.
#
def lisp_is_decent_dns_suffix(dns_name):
if (lisp_decent_dns_suffix == None): return(False)
name = dns_name.split(".")
name = ".".join(name[1::])
return(name == lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_index
#
# Hash the EID-prefix and mod the configured LISP-Decent modulus value. We
# do a sha256() over a string representation of "[<iid>]<eid>", take the
# high-order 6 bytes from the hash and do the modulus on that value.
#
# The seed/password for the sha256 hash is string "".
#
def lisp_get_decent_index(eid):
eid_str = eid.print_prefix()
hash_value = hmac.new(b"lisp-decent", eid_str, hashlib.sha256).hexdigest()
#
# Get hash-length to modulate from LISP_DECENT_HASH_WIDTH in bytes.
#
hash_width = os.getenv("LISP_DECENT_HASH_WIDTH")
if (hash_width in ["", None]):
hash_width = 12
else:
hash_width = int(hash_width)
if (hash_width > 32):
hash_width = 12
else:
hash_width *= 2
#endif
#endif
mod_value = hash_value[0:hash_width]
index = int(mod_value, 16) % lisp_decent_modulus
lprint("LISP-Decent modulus {}, hash-width {}, mod-value {}, index {}". \
format(lisp_decent_modulus, old_div(hash_width, 2) , mod_value, index))
return(index)
#enddef
#
# lisp_get_decent_dns_name
#
# Based on EID, get index and prepend to LISP-Decent DNS name suffix.
#
def lisp_get_decent_dns_name(eid):
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_dns_name_from_str
#
# Supplied source and group are addresses passed as strings. Build in internal
# lisp_address() to pass into lisp_get_decent_index().
#
def lisp_get_decent_dns_name_from_str(iid, eid_str):
eid = lisp_address(LISP_AFI_NONE, eid_str, 0, iid)
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_trace_append
#
# Append JSON data to trace packet. If this is the ETR, the EIDs will be
# swapped to return packet to originator.
#
# Returning False means the caller should return (and not forward the packet).
#
def lisp_trace_append(packet, reason=None, ed="encap", lisp_socket=None,
rloc_entry=None):
offset = 28 if packet.inner_version == 4 else 48
trace_pkt = packet.packet[offset::]
trace = lisp_trace()
if (trace.decode(trace_pkt) == False):
lprint("Could not decode JSON portion of a LISP-Trace packet")
return(False)
#endif
next_rloc = "?" if packet.outer_dest.is_null() else \
packet.outer_dest.print_address_no_iid()
#
# Display port if in this call is a encapsulating RTR using a translated
# RLOC.
#
if (next_rloc != "?" and packet.encap_port != LISP_DATA_PORT):
if (ed == "encap"): next_rloc += ":{}".format(packet.encap_port)
#endif
#
# Add node entry data for the encapsulation or decapsulation.
#
entry = {}
entry["n"] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else \
"RTR" if lisp_i_am_rtr else "?"
srloc = packet.outer_source
if (srloc.is_null()): srloc = lisp_myrlocs[0]
entry["sr"] = srloc.print_address_no_iid()
#
# In the source RLOC include the ephemeral port number of the ltr client
# so RTRs can return errors to the client behind a NAT.
#
if (entry["n"] == "ITR" and packet.inner_sport != LISP_TRACE_PORT):
entry["sr"] += ":{}".format(packet.inner_sport)
#endif
entry["hn"] = lisp_hostname
key = ed[0] + "ts"
entry[key] = lisp_get_timestamp()
#
# If this is a ETR decap entry and the drloc is "?", the packet came in on
# lisp_etr_nat_data_plane() where the kernel strips the outer header. Get
# the local/private RLOC from our database-mapping.
#
if (next_rloc == "?" and entry["n"] == "ETR"):
db = lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db != None and len(db.rloc_set) >= 1):
next_rloc = db.rloc_set[0].rloc.print_address_no_iid()
#endif
#endif
entry["dr"] = next_rloc
#
# If there is a reason there is no dest RLOC, include it.
#
if (next_rloc == "?" and reason != None):
entry["dr"] += " ({})".format(reason)
#endif
#
# Add recent-rtts, recent-hops, and recent-latencies.
#
if (rloc_entry != None):
entry["rtts"] = rloc_entry.recent_rloc_probe_rtts
entry["hops"] = rloc_entry.recent_rloc_probe_hops
entry["lats"] = rloc_entry.recent_rloc_probe_latencies
#endif
#
# Build seid->deid record if it does not exist. Then append node entry
# to record below, in the search loop.
#
seid = packet.inner_source.print_address()
deid = packet.inner_dest.print_address()
if (trace.packet_json == []):
rec = {}
rec["se"] = seid
rec["de"] = deid
rec["paths"] = []
trace.packet_json.append(rec)
#endif
#
# Search for record. If we appending the first ITR node entry, get its
# RLOC address in case we have to return-to-sender.
#
for rec in trace.packet_json:
if (rec["de"] != deid): continue
rec["paths"].append(entry)
break
#endfor
#
# If we are destination-EID, add a new record deid->seid if we have not
# completed a round-trip. The ETR will deliver this packet from its own
# EID which means the co-located ITR will pcap the packet and add its
# encap node entry.
#
swap = False
if (len(trace.packet_json) == 1 and entry["n"] == "ETR" and
trace.myeid(packet.inner_dest)):
rec = {}
rec["se"] = deid
rec["de"] = seid
rec["paths"] = []
trace.packet_json.append(rec)
swap = True
#endif
#
# Print the JSON packet after we appended data to it. Put the new JSON in
# packet. Fix up lengths and checksums from inner headers.
#
trace.print_trace()
trace_pkt = trace.encode()
#
# If next_rloc is not known, we need to return packet to sender.
#
# Otherwise we are forwarding a packet that is about to encapsulated or we
# are forwarding a packet that was just decapsulated with the addresses
# swapped so we can turn it around.
#
sender_rloc = trace.packet_json[0]["paths"][0]["sr"]
if (next_rloc == "?"):
lprint("LISP-Trace return to sender RLOC {}".format(sender_rloc))
trace.return_to_sender(lisp_socket, sender_rloc, trace_pkt)
return(False)
#endif
#
# Compute length of trace packet. This includes the UDP header, Trace
# header, and JSON payload.
#
udplen = trace.packet_length()
#
# Fix up UDP length and recompute UDP checksum if IPv6 packet, zero
# otherwise. Only do checksum when the Trace went round-trip and this is
# the local ETR delivery EID-based Trace packet to the client ltr.
#
headers = packet.packet[0:offset]
p = struct.pack("HH", socket.htons(udplen), 0)
headers = headers[0:offset-4] + p
if (packet.inner_version == 6 and entry["n"] == "ETR" and
len(trace.packet_json) == 2):
udp = headers[offset-8::] + trace_pkt
udp = lisp_udp_checksum(seid, deid, udp)
headers = headers[0:offset-8] + udp[0:8]
#endif
#
# If we are swapping addresses, do it here so the JSON append and IP
# header fields changes are all reflected in new IPv4 header checksum.
#
# Clear the DF-bit because we may have to fragment as the packet is going
# to grow with trace data.
#
if (swap):
if (packet.inner_version == 4):
headers = headers[0:12] + headers[16:20] + headers[12:16] + \
headers[22:24] + headers[20:22] + headers[24::]
else:
headers = headers[0:8] + headers[24:40] + headers[8:24] + \
headers[42:44] + headers[40:42] + headers[44::]
#endif
d = packet.inner_dest
packet.inner_dest = packet.inner_source
packet.inner_source = d
# df_flags = struct.unpack("B", headers[6:7])[0] & 0xbf
# headers = headers[0:6] + struct.pack("B", df_flags) + headers[7::]
#endif
#
# Fix up IP length.
#
offset = 2 if packet.inner_version == 4 else 4
iplen = 20 + udplen if packet.inner_version == 4 else udplen
h = struct.pack("H", socket.htons(iplen))
headers = headers[0:offset] + h + headers[offset+2::]
#
# Fix up IPv4 header checksum.
#
if (packet.inner_version == 4):
c = struct.pack("H", 0)
headers = headers[0:10] + c + headers[12::]
h = lisp_ip_checksum(headers[0:20])
headers = h + headers[20::]
#endif
#
# Caller is forwarding packet, either as an ITR, RTR, or ETR.
#
packet.packet = headers + trace_pkt
return(True)
#enddef
#
# lisp_allow_gleaning
#
# Check the lisp_glean_mapping array to see if we should glean the EID and
# RLOC. Find first match. Return False if there are no configured glean
# mappings. The second return value is either True or False depending if the
# matched entry was configured to RLOC-probe the RLOC for the gleaned entry.
#
def lisp_allow_gleaning(eid, group, rloc):
if (lisp_glean_mappings == []): return(False, False, False)
for entry in lisp_glean_mappings:
if ("instance-id" in entry):
iid = eid.instance_id
low, high = entry["instance-id"]
if (iid < low or iid > high): continue
#endif
if ("eid-prefix" in entry):
e = copy.deepcopy(entry["eid-prefix"])
e.instance_id = eid.instance_id
if (eid.is_more_specific(e) == False): continue
#endif
if ("group-prefix" in entry):
if (group == None): continue
g = copy.deepcopy(entry["group-prefix"])
g.instance_id = group.instance_id
if (group.is_more_specific(g) == False): continue
#endif
if ("rloc-prefix" in entry):
if (rloc != None and rloc.is_more_specific(entry["rloc-prefix"])
== False): continue
#endif
return(True, entry["rloc-probe"], entry["igmp-query"])
#endfor
return(False, False, False)
#enddef
#
# lisp_build_gleaned_multicast
#
# Build (*,G) map-cache entry in RTR with gleaned RLOC info from IGMP report.
#
def lisp_build_gleaned_multicast(seid, geid, rloc, port, igmp):
group_str = geid.print_address()
seid_name = seid.print_address_no_iid()
s = green("{}".format(seid_name), False)
e = green("(*, {})".format(group_str), False)
r = red(rloc.print_address_no_iid() + ":" + str(port), False)
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None):
mc = lisp_mapping("", "", [])
mc.group.copy_address(geid)
mc.eid.copy_address(geid)
mc.eid.address = 0
mc.eid.mask_len = 0
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_IGMP_TTL
mc.gleaned = True
mc.add_cache()
lprint("Add gleaned EID {} to map-cache".format(e))
#endif
#
# Check to see if RLE node exists. If so, update the RLE node RLOC and
# encap-port.
#
rloc_entry = rle_entry = rle_node = None
if (mc.rloc_set != []):
rloc_entry = mc.rloc_set[0]
if (rloc_entry.rle):
rle_entry = rloc_entry.rle
for rn in rle_entry.rle_nodes:
if (rn.rloc_name != seid_name): continue
rle_node = rn
break
#endfor
#endif
#endif
#
# Adding RLE to existing rloc-set or create new one.
#
if (rloc_entry == None):
rloc_entry = lisp_rloc()
mc.rloc_set = [rloc_entry]
rloc_entry.priority = 253
rloc_entry.mpriority = 255
mc.build_best_rloc_set()
#endif
if (rle_entry == None):
rle_entry = lisp_rle(geid.print_address())
rloc_entry.rle = rle_entry
#endif
if (rle_node == None):
rle_node = lisp_rle_node()
rle_node.rloc_name = seid_name
rle_entry.rle_nodes.append(rle_node)
rle_entry.build_forwarding_list()
lprint("Add RLE {} from {} for gleaned EID {}".format(r, s, e))
elif (rloc.is_exact_match(rle_node.address) == False or
port != rle_node.translated_port):
lprint("Changed RLE {} from {} for gleaned EID {}".format(r, s, e))
#endif
#
# Add or update.
#
rle_node.store_translated_rloc(rloc, port)
#
# An IGMP report was received. Update timestamp so we don't time out
# actively joined groups.
#
if (igmp):
seid_str = seid.print_address()
if (seid_str not in lisp_gleaned_groups):
lisp_gleaned_groups[seid_str] = {}
#endif
lisp_gleaned_groups[seid_str][group_str] = lisp_get_timestamp()
#endif
#enddef
#
# lisp_remove_gleaned_multicast
#
# Remove an RLE from a gleaned entry since an IGMP Leave message was received.
#
def lisp_remove_gleaned_multicast(seid, geid):
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None): return
rle = mc.rloc_set[0].rle
if (rle == None): return
rloc_name = seid.print_address_no_iid()
found = False
for rle_node in rle.rle_nodes:
if (rle_node.rloc_name == rloc_name):
found = True
break
#endif
#endfor
if (found == False): return
#
# Found entry to remove.
#
rle.rle_nodes.remove(rle_node)
rle.build_forwarding_list()
group_str = geid.print_address()
seid_str = seid.print_address()
s = green("{}".format(seid_str), False)
e = green("(*, {})".format(group_str), False)
lprint("Gleaned EID {} RLE removed for {}".format(e, s))
#
# Remove that EID has joined the group.
#
if (seid_str in lisp_gleaned_groups):
if (group_str in lisp_gleaned_groups[seid_str]):
lisp_gleaned_groups[seid_str].pop(group_str)
#endif
#endif
#
# Remove map-cache entry if no more RLEs present.
#
if (rle.rle_nodes == []):
mc.delete_cache()
lprint("Gleaned EID {} remove, no more RLEs".format(e))
#endif
#enddef
#
# lisp_change_gleaned_multicast
#
# Change RLOC for each gleaned group this EID has joined.
#
def lisp_change_gleaned_multicast(seid, rloc, port):
seid_str = seid.print_address()
if (seid_str not in lisp_gleaned_groups): return
for group in lisp_gleaned_groups[seid_str]:
lisp_geid.store_address(group)
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, port, False)
#endfor
#enddef
#
# lisp_process_igmp_packet
#
# Process IGMP packets.
#
# Basically odd types are Joins and even types are Leaves.
#
#
# An IGMPv1 and IGMPv2 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Version| Type | Unused | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Group Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 0x22 | Reserved | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Number of Group Records (M) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [1] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [2] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . |
# . . .
# | . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [M] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 group record format is:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record Type | Aux Data Len | Number of Sources (N) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Multicast Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address [1] |
# +- -+
# | Source Address [2] |
# +- -+
# . . .
# . . .
# . . .
# +- -+
# | Source Address [N] |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Auxiliary Data .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
#
# The function returns a boolean (True) when packet is an IGMP query and
# an array when it is a report. Caller must check where there is context
# to deal with IGMP queries.
#
# IMPORTANT NOTE: for encapsulated IGMP Queries to be forwarded correctly
# after the ETR decapsulates them, you need this in the kernel (put this
# statement in the RL script):
#
# ip route add 224.0.0.1/32 dev lo
#
# For OOR runnnig as a LISP-MN use:
#
# ip route add 224.0.0.1/32 dev utun4
#
igmp_types = { 17 : "IGMP-query", 18 : "IGMPv1-report", 19 : "DVMRP",
20 : "PIMv1", 22 : "IGMPv2-report", 23 : "IGMPv2-leave",
30 : "mtrace-response", 31 : "mtrace-request", 34 : "IGMPv3-report" }
lisp_igmp_record_types = { 1 : "include-mode", 2 : "exclude-mode",
3 : "change-to-include", 4 : "change-to-exclude", 5 : "allow-new-source",
6 : "block-old-sources" }
def lisp_process_igmp_packet(packet):
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
source = bold("from {}".format(source.print_address_no_iid()), False)
r = bold("Receive", False)
lprint("{} {}-byte {}, IGMP packet: {}".format(r, len(packet), source,
lisp_format_packet(packet)))
#
# Jump over IP header.
#
header_offset = (struct.unpack("B", packet[0:1])[0] & 0x0f) * 4
#
# Check for IGMPv3 type value 0x22. Or process an IGMPv2 report.
#
igmp = packet[header_offset::]
igmp_type = struct.unpack("B", igmp[0:1])[0]
#
# Maybe this is an IGMPv1 or IGMPv2 message so get group address. If
# IGMPv3, we will fix up group address in loop (for each group record).
#
group = lisp_address(LISP_AFI_IPV4, "", 32, 0)
group.address = socket.ntohl(struct.unpack("II", igmp[:8])[1])
group_str = group.print_address_no_iid()
if (igmp_type == 17):
lprint("IGMP Query for group {}".format(group_str))
return(True)
#endif
reports_and_leaves_only = (igmp_type in (0x12, 0x16, 0x17, 0x22))
if (reports_and_leaves_only == False):
igmp_str = "{} ({})".format(igmp_type, igmp_types[igmp_type]) if \
(igmp_type in igmp_types) else igmp_type
lprint("IGMP type {} not supported".format(igmp_str))
return([])
#endif
if (len(igmp) < 8):
lprint("IGMP message too small")
return([])
#endif
#
# Process either IGMPv1 or IGMPv2 and exit.
#
if (igmp_type == 0x17):
lprint("IGMPv2 leave (*, {})".format(bold(group_str, False)))
return([[None, group_str, False]])
#endif
if (igmp_type in (0x12, 0x16)):
lprint("IGMPv{} join (*, {})".format( \
1 if (igmp_type == 0x12) else 2, bold(group_str, False)))
#
# Suppress for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
else:
return([[None, group_str, True]])
#endif
#
# Finished with IGMPv1 or IGMPv2 processing.
#
return([])
#endif
#
# Parse each record for IGMPv3 (igmp_type == 0x22).
#
record_count = group.address
igmp = igmp[8::]
group_format = "BBHI"
group_size = struct.calcsize(group_format)
source_format = "I"
source_size = struct.calcsize(source_format)
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
#
# Traverse each group record.
#
register_entries = []
for i in range(record_count):
if (len(igmp) < group_size): return
record_type, x, source_count, address = struct.unpack(group_format,
igmp[:group_size])
igmp = igmp[group_size::]
if (record_type not in lisp_igmp_record_types):
lprint("Invalid record type {}".format(record_type))
continue
#endif
record_type_str = lisp_igmp_record_types[record_type]
source_count = socket.ntohs(source_count)
group.address = socket.ntohl(address)
group_str = group.print_address_no_iid()
lprint("Record type: {}, group: {}, source-count: {}".format( \
record_type_str, group_str, source_count))
#
# Determine if this is a join or leave. MODE_IS_INCLUDE (1) is a join.
# MODE_TO_EXCLUDE (4) with no sources is a join. CHANGE_TO_INCLUDE (5)
# is a join. Everything else is a leave.
#
joinleave = False
if (record_type in (1, 5)): joinleave = True
if (record_type in (2, 4) and source_count == 0): joinleave = True
j_or_l = "join" if (joinleave) else "leave"
#
# Suppress registration for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
continue
#endif
#
# (*,G) Join or Leave has been received if source count is 0.
#
# If this is IGMPv2 or just IGMPv3 reporting a group address, encode
# a (*,G) for the element in the register_entries array.
#
if (source_count == 0):
register_entries.append([None, group_str, joinleave])
lprint("IGMPv3 {} (*, {})".format(bold(j_or_l, False),
bold(group_str, False)))
#endif
#
# Process (S,G)s (source records)..
#
for j in range(source_count):
if (len(igmp) < source_size): return
address = struct.unpack(source_format, igmp[:source_size])[0]
source.address = socket.ntohl(address)
source_str = source.print_address_no_iid()
register_entries.append([source_str, group_str, joinleave])
lprint("{} ({}, {})".format(j_or_l,
green(source_str, False), bold(group_str, False)))
igmp = igmp[source_size::]
#endfor
#endfor
#
# Return (S,G) entries to return to call to send a Map-Register.
# They are put in a multicast Info LCAF Type with ourselves as an RLE.
# This is spec'ed in RFC 8378.
#
return(register_entries)
#enddef
#
# lisp_glean_map_cache
#
# Add or update a gleaned EID/RLOC to the map-cache. This function will do
# this for the source EID of a packet and IGMP reported groups with one call.
#
lisp_geid = lisp_address(LISP_AFI_IPV4, "", 32, 0)
def lisp_glean_map_cache(seid, rloc, encap_port, igmp):
#
# First do lookup to see if EID is in map-cache. Check to see if RLOC
# or encap-port needs updating. If not, return. Set refresh timer since
# we received a packet from the source gleaned EID.
#
rloc_change = True
mc = lisp_map_cache.lookup_cache(seid, True)
if (mc and len(mc.rloc_set) != 0):
mc.last_refresh_time = lisp_get_timestamp()
cached_rloc = mc.rloc_set[0]
orloc = cached_rloc.rloc
oport = cached_rloc.translated_port
rloc_change = (orloc.is_exact_match(rloc) == False or
oport != encap_port)
if (rloc_change):
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Change gleaned EID {} to RLOC {}".format(e, r))
cached_rloc.delete_from_rloc_probe_list(mc.eid, mc.group)
lisp_change_gleaned_multicast(seid, rloc, encap_port)
#endif
else:
mc = lisp_mapping("", "", [])
mc.eid.copy_address(seid)
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_GLEAN_TTL
mc.gleaned = True
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Add gleaned EID {} to map-cache with RLOC {}".format(e, r))
mc.add_cache()
#endif
#
# Adding RLOC to new map-cache entry or updating RLOC for existing entry..
#
if (rloc_change):
rloc_entry = lisp_rloc()
rloc_entry.store_translated_rloc(rloc, encap_port)
rloc_entry.add_to_rloc_probe_list(mc.eid, mc.group)
rloc_entry.priority = 253
rloc_entry.mpriority = 255
rloc_set = [rloc_entry]
mc.rloc_set = rloc_set
mc.build_best_rloc_set()
#endif
#
# Unicast gleaning only.
#
if (igmp == None): return
#
# Process IGMP report. For each group, put in map-cache with gleaned
# source RLOC and source port.
#
lisp_geid.instance_id = seid.instance_id
#
# Add (S,G) or (*,G) to map-cache. Do not do lookup in group-mappings.
# The lisp-etr process will do this.
#
entries = lisp_process_igmp_packet(igmp)
if (type(entries) == bool): return
for source, group, joinleave in entries:
if (source != None): continue
#
# Does policy allow gleaning for this joined multicast group.
#
lisp_geid.store_address(group)
allow, x, y = lisp_allow_gleaning(seid, lisp_geid, rloc)
if (allow == False): continue
if (joinleave):
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, encap_port,
True)
else:
lisp_remove_gleaned_multicast(seid, lisp_geid)
#endif
#endfor
#enddef
#
# lisp_is_json_telemetry
#
# Return dictionary arraay if json string has the following two key/value
# pairs in it. Otherwise, return None.
#
# { "type" : "telemetry", "sub-type" : "timestamps" }
#
def lisp_is_json_telemetry(json_string):
try:
tel = json.loads(json_string)
if (type(tel) != dict): return(None)
except:
lprint("Could not decode telemetry json: {}".format(json_string))
return(None)
#endtry
if ("type" not in tel): return(None)
if ("sub-type" not in tel): return(None)
if (tel["type"] != "telemetry"): return(None)
if (tel["sub-type"] != "timestamps"): return(None)
return(tel)
#enddef
#
# lisp_encode_telemetry
#
# Take json string:
#
# { "type" : "telemetry", "sub-type" : "timestamps", "itr-out" : "?",
# "etr-in" : "?", "etr-out" : "?", "itr-in" : "?" }
#
# And fill in timestamps for the 4 fields. Input to this function is a string.
#
def lisp_encode_telemetry(json_string, ii="?", io="?", ei="?", eo="?"):
tel = lisp_is_json_telemetry(json_string)
if (tel == None): return(json_string)
if (tel["itr-in"] == "?"): tel["itr-in"] = ii
if (tel["itr-out"] == "?"): tel["itr-out"] = io
if (tel["etr-in"] == "?"): tel["etr-in"] = ei
if (tel["etr-out"] == "?"): tel["etr-out"] = eo
json_string = json.dumps(tel)
return(json_string)
#enddef
#
# lisp_decode_telemetry
#
# Take json string:
#
# { "type" : "telemetry", "sub-type" : "timestamps", "itr-out" : "?",
# "etr-in" : "?", "etr-out" : "?", "itr-in" : "?" }
#
# And return values in a dictionary array. Input to this function is a string.
#
def lisp_decode_telemetry(json_string):
tel = lisp_is_json_telemetry(json_string)
if (tel == None): return({})
return(tel)
#enddef
#
# lisp_telemetry_configured
#
# Return JSON string template of telemetry data if it has been configured.
# If it has been configured we'll find a "lisp json" command with json-name
# "telemetry". If found, return the json string. Otherwise, return None.
#
def lisp_telemetry_configured():
if ("telemetry" not in lisp_json_list): return(None)
json_string = lisp_json_list["telemetry"].json_string
if (lisp_is_json_telemetry(json_string) == None): return(None)
return(json_string)
#enddef
#
# lisp_mr_or_pubsub
#
# Test action for Map-Request or Map-Request with Subscribe bit set.
#
def lisp_mr_or_pubsub(action):
return(action in [LISP_SEND_MAP_REQUEST_ACTION, LISP_SEND_PUBSUB_ACTION])
#enddef
#------------------------------------------------------------------------------
|
compareRegressionResults.py | #!/usr/bin/env python
# author: Alvaro Gonzalez Arroyo
# email: <alvaro.g.arroyo@gmail.com>
# description: a basic script to compare results of several Jenkins executions
import urllib2
import argparse
import re
import os
import threading
import sys
from collections import defaultdict
from display.table import Table
from display.console_colors import color_ok, color_error
from display.unbuffered import Unbuffered
def main():
parser = argparse.ArgumentParser(description="Show the test cases that fail in a local Jenkins job execution but pass in a reference Jenkins job execution")
parser.add_argument("--local", required=True, nargs='+', help='Link to local jenkins jobs')
parser.add_argument("--reference", required=True, help='Link to reference jenkins job')
parser.add_argument("--both-fail", required=False, action='store_true',
help='With this option enabled, what the script shows are the test cases that fail simultaneously in both local and reference jobs')
parser.add_argument("--color", required=False, action='store_true', help='Show results with beautiful colors')
parser.add_argument("--compare", required=False, type=check_positive,
help='Number of previous executions to the reference one to compare with. Useful to see if there are unstable test cases')
parser.add_argument("--test-suite", required=False, nargs='+', help='Test suite names to filter results')
args = parser.parse_args()
url_local = args.local
url_ref = args.reference
l = [['Test case']]
row = 0
col = 0
tc_show = dict()
local_check_status = ['FAILED', 'REGRESSION']
ref_check_status = local_check_status if args.both_fail else ['PASSED', 'FIXED']
reference = [set()]
tcs_reference = [dict()]
local = [set() for x in range(len(url_local))]
tcs_local = [dict() for x in range(len(url_local))]
i = 0
def local_fetcher(i, lock):
data_local = read_url(url)
suites_local = get_suites(data_local)
with lock:
tcs_local[i] = get_all_tcs(suites_local)
local[i] = set(tcs_local[i])
l[0].append('Local status')
def ref_fetcher():
data_ref = read_url(url_ref)
suites_ref = get_suites(data_ref)
tcs_reference[0] = get_all_tcs(suites_ref)
reference[0] = set(tcs_reference[0])
lock = threading.Lock()
threads = list()
print 'Reading data from URLs ... ',
t = threading.Thread(target=ref_fetcher)
threads.append(t)
t.start()
for url in url_local:
t = threading.Thread(target=local_fetcher, args=(i, lock))
threads.append(t)
t.start()
col += 1
i += 1
for t in threads:
t.join()
print color_ok('DONE')
tcs_reference = tcs_reference[0]
reference = reference[0]
i = 0
print 'Comparing results ... ',
for url in url_local:
for key in local[i]:
try:
if (tcs_local[i][key] in local_check_status) and (tcs_reference[key] in ref_check_status):
tc_show, row, l = append_tc(key, tc_show, row, l, url_local, tcs_local, args.color, local_check_status)
except KeyError:
if tcs_local[i][key] in local_check_status:
tc_show, row, l = append_tc(key, tc_show, row, l, url_local, tcs_local, args.color, local_check_status)
i += 1
for key in tc_show.keys():
try:
if args.color:
if tcs_reference[key] in local_check_status:
l[tc_show[key]][-1] = color_error(tcs_reference[key])
else:
l[tc_show[key]][-1] = color_ok(tcs_reference[key])
else:
l[tc_show[key]][-1] = tcs_reference[key]
except KeyError:
l[tc_show[key]][-1] = ''
l[0].append('Reference status')
print color_ok('DONE')
def comparer(i, lock):
url_N = get_previous_N_url(url_ref, i)
try:
data_ref = read_url(url_N)
except urllib2.HTTPError:
return
suites_ref = get_suites(data_ref)
tcs_reference_N = get_all_tcs(suites_ref)
for key in tc_show.keys():
try:
with lock:
if tcs_reference_N[key] in local_check_status:
tcs_comparison[key] += 1
tcs_total[key] += 1
except KeyError:
pass
if args.compare:
print 'Comparing with previous regressions ... ',
tcs_comparison = dict.fromkeys(tc_show.keys(), 0)
tcs_total = dict.fromkeys(tc_show.keys(), 0)
lock = threading.Lock()
threads = list()
for i in range(1, args.compare+1):
t = threading.Thread(target=comparer, args=(i, lock))
threads.append(t)
t.start()
for t in threads:
t.join()
l[0].append('#fail (total) in previous reference executions')
for key in tc_show.keys():
l[tc_show[key]].append(str(tcs_comparison[key]) + ' (' + str(tcs_total[key]) + ')')
print color_ok('DONE')
print 'Showing results ... '
header = l[0]
l.pop(0)
if args.test_suite:
l2 = [[]]
for suite in args.test_suite:
l2 += filter(lambda x: x[0].startswith(suite), l[1:])
else:
l2 = l
l2.sort()
l2.insert(0, header)
try:
cols = get_terminal_width()
print Table(l2, [3*cols/7, cols/7, cols/7, 2*cols/7], spacing=True, count=True)
except:
print Table(l2, [0, 30, 40, 60], spacing=True, count=True)
def read_url(url):
'''
Read data in JSON format from URL and returns it as a dictionary
:param url: URL to read data from
:type url: str
:return: data readen from URL
:rtype: dict
'''
if not url.endswith('/testReport/api/python') and not url.endswith('/testReport/api/python/'):
url += '/testReport/api/python'
response = urllib2.urlopen(url)
return eval(response.read())
def get_suites(data):
'''
Get all test suites from input dictionary data
:param data: data to be parsed
:type data: dict
:return: test suites information
:rtype: dict
'''
return data.get('suites', [])
def get_all_tcs(suites):
'''
Parse input test suites to get all test cases in form of dictionary with key:value
test_suite_name.test_case_name : status, where status will be typically 'FAILED', 'REGRESSION', 'PASSED', 'FIXED'
:param suites: test suites to be parsed
:type suites: dict
:return: test cases
:rtype: dict
'''
tcs = dict()
for suite in suites:
tcs_per_suite = suite.get('cases')
for tc in tcs_per_suite:
tcs.update({tc['className'] + '.' + tc['name']: tc['status']})
return tcs
def check_positive(value):
'''
To be used as argument parser to check if input parameter is a positive integer
:param value: input parameter
:type value: str
:return: int(value)
:rtype: int
'''
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is an invalid positive int value" %value)
return ivalue
def get_previous_N_url(url, N):
'''
URL finished with a number, typically a Jenkins job execution,
is calculated the URL corresponding to the previous N execution
:param url: URL from where to calculate the previous N one
:type url: str
:param N: N previos execution
:type N: str
:return: corresponding URL of the previous N build
:rtype: str
'''
if url.endswith('/testReport/api/python') or url.endswith('/testReport/api/python/'):
url = re.sub('\/testReport/api/python', '', url)
if url.endswith('/'):
url = url[0:len(url)-1]
l = url.rsplit('/')
j = l[-1]
url = '/'.join(l[0:len(l)-1]) + '/' + str(int(j)-int(N))
return url
def get_terminal_size():
rows, cols = os.popen('stty size', 'r').read().split()
return int(rows), int(cols)
def get_terminal_width():
rows, cols = get_terminal_size()
return cols
if __name__ == "__main__":
sys.stdout = Unbuffered(sys.stdout)
main()
exit(0)
|
processes.py | import logging
import multiprocessing
import os
import random
import signal
import sys
import threading
import traceback
import westpa.work_managers as work_managers
from . core import WorkManager, WMFuture
log = logging.getLogger(__name__)
# Tasks are tuples ('task', task_id, fn, args, kwargs).
# Results are tuples (rtype, task_id, payload) where rtype is 'result' or 'exception' and payload is the return value
# or exception, respectively.
task_shutdown_sentinel = ('shutdown', None, None, (), {})
result_shutdown_sentinel = ('shutdown', None, None)
class ProcessWorkManager(WorkManager):
'''A work manager using the ``multiprocessing`` module.'''
@classmethod
def from_environ(cls, wmenv=None):
if wmenv is None:
wmenv = work_managers.environment.default_env
return cls(wmenv.get_val('n_workers', multiprocessing.cpu_count(), int))
def __init__(self, n_workers = None, shutdown_timeout = 1):
super(ProcessWorkManager,self).__init__()
self.n_workers = n_workers or multiprocessing.cpu_count()
self.workers = None
self.task_queue = multiprocessing.Queue()
self.result_queue = multiprocessing.Queue()
self.receive_thread = None
self.pending = None
self.shutdown_received = False
self.shutdown_timeout = shutdown_timeout or 1
def task_loop(self):
# Close standard input, so we don't get SIGINT from ^C
try:
sys.stdin.close()
except Exception as e:
log.info("can't close stdin: {}".format(e))
# (re)initialize random number generator in this process
random.seed()
while not self.shutdown_received:
message, task_id, fn, args, kwargs = self.task_queue.get()[:5]
if message == 'shutdown':
break
try:
result = fn(*args, **kwargs)
except BaseException as e:
result_tuple = ('exception', task_id, (e, traceback.format_exc()))
else:
result_tuple = ('result', task_id, result)
self.result_queue.put(result_tuple)
log.debug('exiting task_loop')
return
def results_loop(self):
while not self.shutdown_received:
message, task_id, payload = self.result_queue.get()[:3]
if message == 'shutdown':
break
elif message == 'exception':
future = self.pending.pop(task_id)
future._set_exception(*payload)
elif message == 'result':
future = self.pending.pop(task_id)
future._set_result(payload)
else:
raise AssertionError('unknown message {!r}'.format((message, task_id, payload)))
log.debug('exiting results_loop')
def submit(self, fn, args=None, kwargs=None):
ft = WMFuture()
log.debug('dispatching {!r}'.format(fn))
self.pending[ft.task_id] = ft
self.task_queue.put(('task', ft.task_id, fn, args or (), kwargs or {}))
return ft
def startup(self):
from . import environment
if not self.running:
log.debug('starting up work manager {!r}'.format(self))
self.running = True
self.workers = [multiprocessing.Process(target=self.task_loop,
name='worker-{:d}-{:x}'.format(i,id(self))) for i in range(self.n_workers)]
pi_name = '{}_PROCESS_INDEX'.format(environment.WMEnvironment.env_prefix)
for iworker,worker in enumerate(self.workers):
os.environ[pi_name] = str(iworker)
worker.start()
try:
del os.environ[pi_name]
except KeyError:
pass
self.pending = dict()
self.receive_thread = threading.Thread(target=self.results_loop, name='receiver')
self.receive_thread.daemon = True
self.receive_thread.start()
def _empty_queues(self):
while not self.task_queue.empty():
try:
self.task_queue.get(block=False)
except multiprocessing.queues.Empty:
break
while not self.result_queue.empty():
try:
self.result_queue.get(block=False)
except multiprocessing.queues.Empty:
break
def shutdown(self):
if self.running:
log.debug('shutting down {!r}'.format(self))
self._empty_queues()
# Send shutdown signal
for _i in range(self.n_workers):
self.task_queue.put(task_shutdown_sentinel, block=False)
for worker in self.workers:
worker.join(self.shutdown_timeout)
if worker.is_alive():
log.debug('sending SIGINT to worker process {:d}'.format(worker.pid))
os.kill(worker.pid, signal.SIGINT)
worker.join(self.shutdown_timeout)
if worker.is_alive():
log.warning('sending SIGKILL to worker process {:d}'.format(worker.pid))
os.kill(worker.pid, signal.SIGKILL)
worker.join()
log.debug('worker process {:d} terminated with code {:d}'.format(worker.pid, worker.exitcode))
else:
log.debug('worker process {:d} terminated gracefully with code {:d}'.format(worker.pid, worker.exitcode))
self._empty_queues()
self.result_queue.put(result_shutdown_sentinel)
self.running = False
|
test_decorator.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from concurrent.futures import ThreadPoolExecutor, as_completed
import os
import subprocess
import sys
import threading
import unittest
from maro.communication import Proxy, SessionMessage, dist
from utils import get_random_port, proxy_generator
def handler_function(that, proxy, message):
replied_payload = {"counter": message.payload["counter"] + 1}
proxy.reply(message, payload=replied_payload)
sys.exit(0)
def lunch_receiver(handler_dict, redis_port):
proxy = proxy_generator("receiver", redis_port)
@dist(proxy, handler_dict)
class Receiver:
def __init__(self):
pass
receiver = Receiver()
receiver.launch()
@unittest.skipUnless(os.environ.get("test_with_redis", False), "require redis")
class TestDecorator(unittest.TestCase):
@classmethod
def setUpClass(cls):
print(f"The dist decorator unit test start!")
# Initial Redis.
redis_port = get_random_port()
cls.redis_process = subprocess.Popen(["redis-server", "--port", str(redis_port), "--daemonize yes"])
cls.redis_process.wait()
# Initial receiver.
conditional_event = "sender:*:1"
handler_dict = {conditional_event: handler_function}
decorator_task = threading.Thread(target=lunch_receiver, args=(handler_dict, redis_port, ))
decorator_task.start()
# Initial sender proxy.
with ThreadPoolExecutor() as executor:
sender_task = executor.submit(proxy_generator, "sender", redis_port)
cls.sender_proxy = sender_task.result()
@classmethod
def tearDownClass(cls) -> None:
print(f"The dist decorator unit test finished!")
if hasattr(cls, "redis_process"):
cls.redis_process.kill()
def test_decorator(self):
message = SessionMessage(tag="unittest",
source=TestDecorator.sender_proxy.component_name,
destination=TestDecorator.sender_proxy.peers["receiver"][0],
payload={"counter": 0})
replied_message = TestDecorator.sender_proxy.send(message)
self.assertEqual(message.payload["counter"]+1, replied_message[0].payload["counter"])
if __name__ == "__main__":
unittest.main()
|
test_jobs.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import json
import logging
import multiprocessing
import os
import shutil
import threading
import time
import unittest
from tempfile import mkdtemp
import psutil
import six
import sqlalchemy
from parameterized import parameterized
import airflow.example_dags
from airflow import AirflowException, models, settings
from airflow import configuration
from airflow.bin import cli
from airflow.exceptions import DagConcurrencyLimitReached, NoAvailablePoolSlot, \
TaskConcurrencyLimitReached
from airflow.executors import BaseExecutor, SequentialExecutor
from airflow.jobs import BackfillJob, BaseJob, LocalTaskJob, SchedulerJob
from airflow.models import DAG, DagBag, DagModel, DagRun, Pool, SlaMiss, \
TaskInstance as TI, errors
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils import timezone
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag, list_py_file_paths
from airflow.utils.dates import days_ago
from airflow.utils.db import create_session
from airflow.utils.db import provide_session
from airflow.utils.net import get_hostname
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from tests.compat import MagicMock, Mock, PropertyMock, patch
from tests.compat import mock
from tests.core import TEST_DAG_FOLDER
from tests.executors.test_executor import TestExecutor
from tests.test_utils.db import clear_db_dags, clear_db_errors, clear_db_pools, \
clear_db_runs, clear_db_sla_miss
from tests.test_utils.decorators import mock_conf_get
configuration.load_test_config()
logger = logging.getLogger(__name__)
DEV_NULL = '/dev/null'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TRY_NUMBER = 1
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class BaseJobTest(unittest.TestCase):
class TestJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'TestJob'
}
def __init__(self, cb):
self.cb = cb
super().__init__()
def _execute(self):
return self.cb()
def test_state_success(self):
job = self.TestJob(lambda: True)
job.run()
self.assertEqual(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_sysexit(self):
import sys
job = self.TestJob(lambda: sys.exit(0))
job.run()
self.assertEqual(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_failed(self):
def abort():
raise RuntimeError("fail")
job = self.TestJob(abort)
with self.assertRaises(RuntimeError):
job.run()
self.assertEqual(job.state, State.FAILED)
self.assertIsNotNone(job.end_date)
class BackfillJobTest(unittest.TestCase):
def _get_dummy_dag(self, dag_id, pool=None, task_concurrency=None):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='op',
pool=pool,
task_concurrency=task_concurrency,
dag=dag)
dag.clear()
return dag
def _times_called_with(self, method, class_):
count = 0
for args in method.call_args_list:
if isinstance(args[0][0], class_):
count += 1
return count
@classmethod
def setUpClass(cls):
cls.dagbag = DagBag(include_examples=True)
def setUp(self):
clear_db_runs()
clear_db_pools()
self.parser = cli.CLIFactory.get_parser()
def test_unfinished_dag_runs_set_to_failed(self):
dag = self._get_dummy_dag('dummy_dag')
dag_run = dag.create_dagrun(
run_id='test',
state=State.RUNNING,
)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=8),
ignore_first_depends_on_past=True
)
job._set_unfinished_dag_runs_to_failed([dag_run])
dag_run.refresh_from_db()
self.assertEquals(State.FAILED, dag_run.state)
def test_dag_run_with_finished_tasks_set_to_success(self):
dag = self._get_dummy_dag('dummy_dag')
dag_run = dag.create_dagrun(
run_id='test',
state=State.RUNNING,
)
for ti in dag_run.get_task_instances():
ti.set_state(State.SUCCESS)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=8),
ignore_first_depends_on_past=True
)
job._set_unfinished_dag_runs_to_failed([dag_run])
dag_run.refresh_from_db()
self.assertEquals(State.SUCCESS, dag_run.state)
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
target_dag.sync_to_db()
scheduler = SchedulerJob()
task_instances_list = Mock()
scheduler._process_task_instances(target_dag, task_instances_list=task_instances_list)
self.assertFalse(task_instances_list.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler._process_task_instances(target_dag, task_instances_list=task_instances_list)
self.assertTrue(task_instances_list.append.called)
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
end_date = DEFAULT_DATE + datetime.timedelta(days=1)
executor = TestExecutor()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=end_date,
executor=executor,
ignore_first_depends_on_past=True
)
job.run()
expected_execution_order = [
("runme_0", DEFAULT_DATE),
("runme_1", DEFAULT_DATE),
("runme_2", DEFAULT_DATE),
("runme_0", end_date),
("runme_1", end_date),
("runme_2", end_date),
("also_run_this", DEFAULT_DATE),
("also_run_this", end_date),
("run_after_loop", DEFAULT_DATE),
("run_after_loop", end_date),
("run_this_last", DEFAULT_DATE),
("run_this_last", end_date),
]
self.maxDiff = None
self.assertListEqual(
[((dag.dag_id, task_id, when, 1), State.SUCCESS)
for (task_id, when) in expected_execution_order],
executor.sorted_tasks
)
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id == dag.dag_id
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf(
"sqlite" in configuration.conf.get("core", "sql_alchemy_conn"),
"concurrent access not supported in sqlite",
)
@parameterized.expand(
[
[
"example_branch_operator",
(
"run_this_first",
"branching",
"branch_a",
"branch_b",
"branch_c",
"branch_d",
"follow_branch_a",
"follow_branch_b",
"follow_branch_c",
"follow_branch_d",
"join",
),
],
[
"example_bash_operator",
("runme_0", "runme_1", "runme_2", "also_run_this", "run_after_loop", "run_this_last"),
],
[
"example_skip_dag",
(
"always_true_1",
"always_true_2",
"skip_operator_1",
"skip_operator_2",
"all_success",
"one_success",
"final_1",
"final_2",
),
],
["latest_only", ("latest_only", "task1")],
]
)
def test_backfill_examples(self, dag_id, expected_execution_order):
"""
Test backfilling example dags
Try to backfill some of the example dags. Be careful, not all dags are suitable
for doing this. For example, a dag that sleeps forever, or does not have a
schedule won't work here since you simply can't backfill them.
"""
self.maxDiff = None
dag = self.dagbag.get_dag(dag_id)
logger.info('*** Running example DAG: %s', dag.dag_id)
executor = TestExecutor()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
ignore_first_depends_on_past=True)
job.run()
self.assertListEqual(
[((dag_id, task_id, DEFAULT_DATE, 1), State.SUCCESS)
for task_id in expected_execution_order],
executor.sorted_tasks
)
def test_backfill_conf(self):
dag = self._get_dummy_dag('test_backfill_conf')
executor = TestExecutor()
conf = json.loads("""{"key": "value"}""")
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
conf=conf)
job.run()
dr = DagRun.find(dag_id='test_backfill_conf')
self.assertEqual(conf, dr[0].conf)
@patch('airflow.jobs.LoggingMixin.log')
def test_backfill_respect_task_concurrency_limit(self, mock_log):
task_concurrency = 2
dag = self._get_dummy_dag(
'test_backfill_respect_task_concurrency_limit',
task_concurrency=task_concurrency,
)
executor = TestExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
self.assertTrue(0 < len(executor.history))
task_concurrency_limit_reached_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
self.assertLessEqual(len(running_task_instances), task_concurrency)
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == task_concurrency:
task_concurrency_limit_reached_at_least_once = True
self.assertEquals(8, num_running_task_instances)
self.assertTrue(task_concurrency_limit_reached_at_least_once)
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
self.assertEquals(0, times_pool_limit_reached_in_debug)
self.assertEquals(0, times_dag_concurrency_limit_reached_in_debug)
self.assertGreater(times_task_concurrency_limit_reached_in_debug, 0)
@patch('airflow.jobs.LoggingMixin.log')
def test_backfill_respect_dag_concurrency_limit(self, mock_log):
dag = self._get_dummy_dag('test_backfill_respect_dag_concurrency_limit')
dag.concurrency = 2
executor = TestExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
self.assertTrue(0 < len(executor.history))
concurrency_limit_reached_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
self.assertLessEqual(len(running_task_instances), dag.concurrency)
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == dag.concurrency:
concurrency_limit_reached_at_least_once = True
self.assertEquals(8, num_running_task_instances)
self.assertTrue(concurrency_limit_reached_at_least_once)
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
self.assertEquals(0, times_pool_limit_reached_in_debug)
self.assertEquals(0, times_task_concurrency_limit_reached_in_debug)
self.assertGreater(times_dag_concurrency_limit_reached_in_debug, 0)
@patch('airflow.jobs.LoggingMixin.log')
@patch('airflow.jobs.conf.getint')
def test_backfill_with_no_pool_limit(self, mock_getint, mock_log):
non_pooled_backfill_task_slot_count = 2
def getint(section, key):
if section.lower() == 'core' and \
'non_pooled_backfill_task_slot_count' == key.lower():
return non_pooled_backfill_task_slot_count
else:
return configuration.conf.getint(section, key)
mock_getint.side_effect = getint
dag = self._get_dummy_dag('test_backfill_with_no_pool_limit')
executor = TestExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
self.assertTrue(0 < len(executor.history))
non_pooled_task_slot_count_reached_at_least_once = False
num_running_task_instances = 0
# if no pool is specified, the number of tasks running in
# parallel per backfill should be less than
# non_pooled_backfill_task_slot_count at any point of time.
for running_task_instances in executor.history:
self.assertLessEqual(
len(running_task_instances),
non_pooled_backfill_task_slot_count,
)
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == non_pooled_backfill_task_slot_count:
non_pooled_task_slot_count_reached_at_least_once = True
self.assertEquals(8, num_running_task_instances)
self.assertTrue(non_pooled_task_slot_count_reached_at_least_once)
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
self.assertEquals(0, times_dag_concurrency_limit_reached_in_debug)
self.assertEquals(0, times_task_concurrency_limit_reached_in_debug)
self.assertGreater(times_pool_limit_reached_in_debug, 0)
def test_backfill_pool_not_found(self):
dag = self._get_dummy_dag(
dag_id='test_backfill_pool_not_found',
pool='king_pool',
)
executor = TestExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
try:
job.run()
except AirflowException:
return
self.fail()
@patch('airflow.jobs.LoggingMixin.log')
def test_backfill_respect_pool_limit(self, mock_log):
session = settings.Session()
slots = 2
pool = Pool(
pool='pool_with_two_slots',
slots=slots,
)
session.add(pool)
session.commit()
dag = self._get_dummy_dag(
dag_id='test_backfill_respect_pool_limit',
pool=pool.pool,
)
executor = TestExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
self.assertTrue(0 < len(executor.history))
pool_was_full_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
self.assertLessEqual(len(running_task_instances), slots)
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == slots:
pool_was_full_at_least_once = True
self.assertEquals(8, num_running_task_instances)
self.assertTrue(pool_was_full_at_least_once)
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
self.assertEquals(0, times_task_concurrency_limit_reached_in_debug)
self.assertEquals(0, times_dag_concurrency_limit_reached_in_debug)
self.assertGreater(times_pool_limit_reached_in_debug, 0)
def test_backfill_run_rescheduled(self):
dag = DAG(
dag_id='test_backfill_run_rescheduled',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_run_rescheduled_task-1',
dag=dag,
)
dag.clear()
executor = TestExecutor()
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UP_FOR_RESCHEDULE)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor()
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_upstream_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_upstream_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
t1 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-1',
dag=dag)
t2 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-2',
dag=dag)
t1.set_upstream(t2)
dag.clear()
executor = TestExecutor()
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UPSTREAM_FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks_without_flag(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor()
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=False
)
with self.assertRaises(AirflowException):
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor()
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
d0 = DEFAULT_DATE
d1 = d0 + datetime.timedelta(days=1)
d2 = d1 + datetime.timedelta(days=1)
# test executor history keeps a list
history = executor.history
self.maxDiff = None
self.assertListEqual(
# key[0] is dag id, key[3] is try_number, we don't care about either of those here
[sorted([item[-1].key[1:3] for item in batch]) for batch in history],
[
[
('leave1', d0),
('leave1', d1),
('leave1', d2),
('leave2', d0),
('leave2', d1),
('leave2', d2)
],
[('upstream_level_1', d0), ('upstream_level_1', d1), ('upstream_level_1', d2)],
[('upstream_level_2', d0), ('upstream_level_2', d1), ('upstream_level_2', d2)],
[('upstream_level_3', d0), ('upstream_level_3', d1), ('upstream_level_3', d2)],
]
)
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
executor=TestExecutor(),
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
executor=TestExecutor(),
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEqual(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args1))
ti_dependency = TI(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEqual(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args2))
ti_dependent = TI(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEqual(ti_dependent.state, State.SUCCESS)
def test_backfill_depends_on_past_backwards(self):
"""
Test that CLI respects -B argument and raises on interaction with depends_on_past
"""
dag_id = 'test_depends_on_past'
start_date = DEFAULT_DATE + datetime.timedelta(days=1)
end_date = start_date + datetime.timedelta(days=1)
kwargs = dict(
start_date=start_date,
end_date=end_date,
)
dag = self.dagbag.get_dag(dag_id)
dag.clear()
executor = TestExecutor()
job = BackfillJob(dag=dag,
executor=executor,
ignore_first_depends_on_past=True,
**kwargs)
job.run()
ti = TI(dag.get_task('test_dop_task'), end_date)
ti.refresh_from_db()
# runs fine forwards
self.assertEqual(ti.state, State.SUCCESS)
# raises backwards
expected_msg = 'You cannot backfill backwards because one or more tasks depend_on_past: {}'.format(
'test_dop_task')
with self.assertRaisesRegexp(AirflowException, expected_msg):
executor = TestExecutor()
job = BackfillJob(dag=dag,
executor=executor,
run_backwards=True,
**kwargs)
job.run()
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay_on_limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
self.assertEqual(0.5, parsed_args.delay_on_limit)
def _get_dag_test_max_active_limits(self, dag_id, max_active_runs=1):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval="@hourly",
max_active_runs=max_active_runs
)
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
dag.clear()
return dag
def test_backfill_max_limit_check_within_limit(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_within_limit',
max_active_runs=16)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = TestExecutor()
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
self.assertEqual(2, len(dagruns))
self.assertTrue(all([run.state == State.SUCCESS for run in dagruns]))
def test_backfill_max_limit_check(self):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dagrun'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
try:
dag = self._get_dag_test_max_active_limits(dag_id)
# this session object is different than the one in the main thread
thread_session = settings.Session()
# Existing dagrun that is not within the backfill range
dag.create_dagrun(
run_id=run_id,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
start_date=DEFAULT_DATE,
)
thread_session.commit()
cond.notify()
finally:
cond.release()
executor = TestExecutor()
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
thread_session.close()
backfill_job_thread = threading.Thread(target=run_backfill,
name="run_backfill",
args=(dag_run_created_cond,))
dag_run_created_cond.acquire()
session = settings.Session()
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
dr = dagruns[0]
self.assertEqual(1, len(dagruns))
self.assertEqual(dr.run_id, run_id)
# allow the backfill to execute by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
session.close()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
self.assertEqual(3, len(dagruns)) # 2 from backfill + 1 existing
self.assertEqual(dagruns[-1].run_id, dr.run_id)
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_no_count_existing')
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag.create_dagrun(run_id="test_existing_backfill",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor()
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
self.assertEqual(1, len(dagruns))
self.assertEqual(State.SUCCESS, dagruns[0].state)
def test_backfill_max_limit_check_complete_loop(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_complete_loop')
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = TestExecutor()
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
self.assertEqual(success_expected, success_dagruns)
self.assertEqual(0, running_dagruns) # no dag_runs in running state are left
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor()
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor()
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = timezone.utcnow()
executor = TestExecutor()
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_subdag_clear_parentdag_downstream_clear(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
executor = TestExecutor()
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
with timeout(seconds=30):
job.run()
ti0 = TI(
task=subdag.get_task('section-1-task-1'),
execution_date=DEFAULT_DATE)
ti0.refresh_from_db()
self.assertEqual(ti0.state, State.SUCCESS)
sdag = subdag.sub_dag(
task_regex='section-1-task-1',
include_downstream=True,
include_upstream=False)
sdag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
include_parentdag=True)
ti0.refresh_from_db()
self.assertEqual(State.NONE, ti0.state)
ti1 = TI(
task=dag.get_task('some-other-task'),
execution_date=DEFAULT_DATE)
self.assertEqual(State.NONE, ti1.state)
# Checks that all the Downstream tasks for Parent DAG
# have been cleared
for task in subdag_op_task.downstream_list:
ti = TI(
task=dag.get_task(task.task_id),
execution_date=DEFAULT_DATE
)
self.assertEqual(State.NONE, ti.state)
subdag.clear()
dag.clear()
def test_backfill_execute_subdag_with_removed_task(self):
"""
Ensure that subdag operators execute properly in the case where
an associated task of the subdag has been removed from the dag
definition, but has instances in the database from previous runs.
"""
dag = self.dagbag.get_dag('example_subdag_operator')
subdag = dag.get_task('section-1').subdag
executor = TestExecutor()
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
removed_task_ti = TI(
task=DummyOperator(task_id='removed_task'),
execution_date=DEFAULT_DATE,
state=State.REMOVED)
removed_task_ti.dag_id = subdag.dag_id
session = settings.Session()
session.merge(removed_task_ti)
with timeout(seconds=30):
job.run()
for task in subdag.tasks:
instance = session.query(TI).filter(
TI.dag_id == subdag.dag_id,
TI.task_id == task.task_id,
TI.execution_date == DEFAULT_DATE).first()
self.assertIsNotNone(instance)
self.assertEqual(instance.state, State.SUCCESS)
removed_task_ti.refresh_from_db()
self.assertEqual(removed_task_ti.state, State.REMOVED)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 1)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 1)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 1)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.failed.clear()
# test for retry
ti.set_state(State.UP_FOR_RETRY, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
# test for reschedule
ti.set_state(State.UP_FOR_RESCHEDULE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
# test for none
ti.set_state(State.NONE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
session.close()
def test_dag_get_run_dates(self):
def get_test_dag_for_backfill(schedule_interval=None):
dag = DAG(
dag_id='test_get_dates',
start_date=DEFAULT_DATE,
schedule_interval=schedule_interval)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
)
return dag
test_dag = get_test_dag_for_backfill()
self.assertEqual([DEFAULT_DATE], test_dag.get_run_dates(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE))
test_dag = get_test_dag_for_backfill(schedule_interval="@hourly")
self.assertEqual([DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE],
test_dag.get_run_dates(
start_date=DEFAULT_DATE - datetime.timedelta(hours=3),
end_date=DEFAULT_DATE,))
def test_backfill_run_backwards(self):
dag = self.dagbag.get_dag("test_start_date_scheduling")
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
run_backwards=True
)
job.run()
session = settings.Session()
tis = session.query(TI).filter(
TI.dag_id == 'test_start_date_scheduling' and TI.task_id == 'dummy'
).order_by(TI.execution_date).all()
queued_times = [ti.queued_dttm for ti in tis]
self.assertTrue(queued_times == sorted(queued_times, reverse=True))
self.assertTrue(all([ti.state == State.SUCCESS for ti in tis]))
dag.clear()
session.close()
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
clear_db_runs()
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@unittest.skipIf('mysql' in configuration.conf.get('core', 'sql_alchemy_conn'),
"flaky when run on mysql")
@unittest.skipIf('postgresql' in configuration.conf.get('core', 'sql_alchemy_conn'),
'flaky when run on postgresql')
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for i in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run,
ignore_ti_state=True,
executor=SequentialExecutor())
with patch.object(BaseTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
def setUp(self):
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
# Speed up some tests by not running the tasks, just look at what we
# enqueue!
self.null_exec = TestExecutor()
@classmethod
def setUpClass(cls):
cls.dagbag = DagBag()
def getboolean(section, key):
if section.lower() == 'core' and key.lower() == 'load_examples':
return False
else:
return configuration.conf.getboolean(section, key)
cls.patcher = mock.patch('airflow.jobs.conf.getboolean')
mock_getboolean = cls.patcher.start()
mock_getboolean.side_effect = getboolean
@classmethod
def tearDownClass(cls):
cls.patcher.stop()
def run_single_scheduler_loop_with_no_dags(self, dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
executor=self.null_exec,
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_no_orphan_process_will_be_left(self):
empty_dir = mkdtemp()
current_process = psutil.Process()
old_children = current_process.children(recursive=True)
scheduler = SchedulerJob(subdir=empty_dir,
num_runs=1,
executor=TestExecutor(do_update=False))
scheduler.run()
shutil.rmtree(empty_dir)
scheduler.executor.terminate()
# Remove potential noise created by previous tests.
current_children = set(current_process.children(recursive=True)) - set(
old_children)
self.assertFalse(current_children)
def test_process_executor_events(self):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
DummyOperator(dag=dag2, task_id=task_id_1)
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob()
session = settings.Session()
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = TestExecutor(do_update=False)
executor.event_buffer[ti1.key] = State.FAILED
scheduler.executor = executor
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.FAILED)
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
@mock_conf_get('core', 'non_pooled_task_slot_count', 1)
def test_find_executable_task_instances_in_non_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_in_non_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
t1 = DummyOperator(dag=dag, task_id='dummy1')
t2 = DummyOperator(dag=dag, task_id='dummy2')
dagbag = self._make_simple_dag_bag([dag])
executor = TestExecutor(do_update=True)
scheduler = SchedulerJob(executor=executor)
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
session = settings.Session()
ti1 = TI(task=t1, execution_date=dr1.execution_date)
ti2 = TI(task=t2, execution_date=dr2.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
# Two tasks w/o pool up for execution and our non_pool size is 1
res = scheduler._find_executable_task_instances(
dagbag,
states=(State.SCHEDULED,),
session=session)
self.assertEqual(1, len(res))
ti2.state = State.RUNNING
ti2.pool = Pool.default_pool_name
session.merge(ti2)
session.commit()
# One task w/o pool up for execution and one task task running
res = scheduler._find_executable_task_instances(
dagbag,
states=(State.SCHEDULED,),
session=session)
self.assertEqual(0, len(res))
session.close()
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr = scheduler.create_dag_run(dag)
ti = TI(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_concurrency_queued(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency_queued'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id='dummy1')
task2 = DummyOperator(dag=dag, task_id='dummy2')
task3 = DummyOperator(dag=dag, task_id='dummy3')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dag_run = scheduler.create_dag_run(dag)
ti1 = TI(task1, dag_run.execution_date)
ti2 = TI(task2, dag_run.execution_date)
ti3 = TI(task3, dag_run.execution_date)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
self.assertEqual(res[0].key, ti3.key)
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
executor = TestExecutor(do_update=True)
scheduler = SchedulerJob(executor=executor)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_1.state = State.QUEUED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SUCCESS
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
executor.queued_tasks[ti1_1.key] = ti1_1
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED, State.QUEUED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
assert mock_queue_command.called
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(
2,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING], session=session
)
)
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(
3,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING, State.QUEUED], session=session
)
)
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for i in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
@unittest.skipUnless("INTEGRATION" in os.environ,
"The test is flaky with nondeterministic result")
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(dag_id='test_change_state_for_tis_without_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag1, owner='airflow')
DummyOperator(task_id='dummy_b', dag=dag1, owner='airflow')
dag2 = DAG(dag_id='test_change_state_for_tis_without_dagrun_dont_change', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag2, owner='airflow')
dag3 = DAG(dag_id='test_change_state_for_tis_without_dagrun_no_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag3, owner='airflow')
session = settings.Session()
dr1 = dag1.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TI(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag1, dag2, dag3])
scheduler = SchedulerJob(num_runs=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEqual(ti3.state, State.NONE)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_change_state_for_tasks_failed_to_execute(self):
dag = DAG(
dag_id='dag_id',
start_date=DEFAULT_DATE)
task = DummyOperator(
task_id='task_id',
dag=dag,
owner='airflow')
# If there's no left over task in executor.queued_tasks, nothing happens
session = settings.Session()
scheduler_job = SchedulerJob()
mock_logger = mock.MagicMock()
test_executor = TestExecutor(do_update=False)
scheduler_job.executor = test_executor
scheduler_job._logger = mock_logger
scheduler_job._change_state_for_tasks_failed_to_execute()
mock_logger.info.assert_not_called()
# Tasks failed to execute with QUEUED state will be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
key = 'dag_id', 'task_id', DEFAULT_DATE, 1
test_executor.queued_tasks[key] = 'value'
ti = TI(task, DEFAULT_DATE)
ti.state = State.QUEUED
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti.state)
# Tasks failed to execute with RUNNING state will not be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
ti.state = State.RUNNING
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
scheduler = SchedulerJob(num_runs=0)
executor = TestExecutor(do_update=False)
scheduler.executor = executor
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@parameterized.expand([
[State.UP_FOR_RETRY, State.FAILED],
[State.QUEUED, State.NONE],
[State.SCHEDULED, State.NONE],
[State.UP_FOR_RESCHEDULE, State.NONE],
])
def test_execute_helper_should_change_state_for_tis_without_dagrun(
self, initial_task_state, expected_task_state):
session = settings.Session()
dag = DAG(
'test_execute_helper_should_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
# Create DAG run with FAILED state
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.FAILED,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = initial_task_state
session.commit()
# Create scheduler and mock calls to processor. Run duration is set
# to a high value to ensure loop is entered. Poll interval is 0 to
# avoid sleep. Done flag is set to true to exist the loop immediately.
scheduler = SchedulerJob(num_runs=0, processor_poll_interval=0)
executor = TestExecutor(do_update=False)
executor.queued_tasks
scheduler.executor = executor
processor = mock.MagicMock()
processor.harvest_simple_dags.return_value = [dag]
processor.done = True
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, expected_task_state)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob()
dag = self.dagbag.get_dag(dag_id)
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
for tid, state in expected_task_states.items():
if state != State.FAILED:
continue
self.null_exec.mock_task_fail(dag_id, tid, ex_date)
try:
dag.run(start_date=ex_date, end_date=ex_date, executor=self.null_exec, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# TODO: this should live in test_dagrun.py
# Run both the failed and successful tasks
scheduler = SchedulerJob()
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dr = scheduler.create_dag_run(dag)
self.null_exec.mock_task_fail(dag_id, 'test_dagrun_fail', DEFAULT_DATE)
with self.assertRaises(AirflowException):
dag.run(start_date=dr.execution_date, end_date=dr.execution_date, executor=self.null_exec)
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
with create_session() as session:
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_root_after_dagrun_unfinished(self):
"""
DagRuns with one successful and one future root task -> SUCCESS
Noted: the DagRun state could be still in running state during CI.
"""
dag_id = 'test_dagrun_states_root_future'
dag = self.dagbag.get_dag(dag_id)
scheduler = SchedulerJob(
dag_id,
num_runs=1,
executor=self.null_exec,
subdir=dag.fileloc)
scheduler.run()
first_run = DagRun.find(dag_id=dag_id, execution_date=DEFAULT_DATE)[0]
ti_ids = [(ti.task_id, ti.state) for ti in first_run.get_task_instances()]
self.assertEqual(ti_ids, [('current', State.SUCCESS)])
self.assertIn(first_run.state, [State.SUCCESS, State.RUNNING])
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
with create_session() as session:
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > datetime.datetime.utcnow())
scheduler = SchedulerJob(dag_id,
executor=self.null_exec,
subdir=dag.fileloc,
num_runs=1)
scheduler.run()
# zero tasks ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
session.commit()
self.assertListEqual([], self.null_exec.sorted_tasks)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
bf_exec = TestExecutor()
backfill = BackfillJob(
executor=bf_exec,
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
self.assertListEqual(
[
((dag.dag_id, 'dummy', DEFAULT_DATE, 1), State.SUCCESS),
],
bf_exec.sorted_tasks
)
session.commit()
scheduler = SchedulerJob(dag_id,
executor=self.null_exec,
subdir=dag.fileloc,
num_runs=1)
scheduler.run()
# still one task
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
session.commit()
self.assertListEqual([], self.null_exec.sorted_tasks)
def test_scheduler_task_start_date(self):
"""
Test that the scheduler respects task start dates that are different
from DAG start dates
"""
dag_id = 'test_task_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_id,
executor=self.null_exec,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=2)
scheduler.run()
session = settings.Session()
tiq = session.query(TI).filter(TI.dag_id == dag_id)
ti1s = tiq.filter(TI.task_id == 'dummy1').all()
ti2s = tiq.filter(TI.task_id == 'dummy2').all()
self.assertEqual(len(ti1s), 0)
self.assertEqual(len(ti2s), 2)
for t in ti2s:
self.assertEqual(t.state, State.SUCCESS)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
executor=self.null_exec,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_scheduler_process_task_instances(self, state, start_date, end_date):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
mock_list = Mock()
scheduler._process_task_instances(dag, task_instances_list=mock_list)
mock_list.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
mock_list = Mock()
scheduler._process_task_instances(dag, task_instances_list=mock_list)
mock_list.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
mock_list = Mock()
scheduler._process_task_instances(dag, task_instances_list=mock_list)
mock_list.put.assert_not_called()
def test_scheduler_do_not_schedule_without_tasks(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_without_tasks',
start_date=DEFAULT_DATE)
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
scheduler = SchedulerJob()
dag.clear(session=session)
dag.start_date = None
dr = scheduler.create_dag_run(dag, session=session)
self.assertIsNone(dr)
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
mock_list = Mock()
scheduler._process_task_instances(dag, task_instances_list=mock_list)
mock_list.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 1)
DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
task_instances_list = Mock()
scheduler._process_task_instances(dag, task_instances_list=task_instances_list)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEqual(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs
has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has
been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
task_instances_list = Mock()
# and schedule them in, so we can check how many
# tasks are put on the task_instances_list (should be one, not 3)
scheduler._process_task_instances(dag, task_instances_list=task_instances_list)
task_instances_list.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob(executor=self.null_exec)
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
task_instances_list = []
scheduler._process_task_instances(dag, task_instances_list=task_instances_list)
self.assertEqual(len(task_instances_list), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in task_instances_list:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
self.assertEquals(len(scheduler.executor.queued_tasks), 0, "Check test pre-condition")
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED, State.UP_FOR_RETRY),
session=session)
self.assertEqual(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor(do_update=False)
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEqual(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEqual(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler calls the sla miss callback
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
test_start_date = days_ago(1)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta()})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
assert sla_callback.called
def test_scheduler_sla_miss_callback_invalid_sla(self):
"""
Test that the scheduler does not call the sla miss callback when
given an invalid sla
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
# Pass anything besides a timedelta object to the sla argument.
test_start_date = days_ago(1)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': None})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_sent_notification(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day
# ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_exception(self):
"""
Test that the scheduler gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss')
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
assert sla_callback.called
mock_log().exception.assert_called_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch("airflow.utils.email.send_email")
def test_scheduler_sla_miss_email_exception(self, mock_send_email):
"""
Test that the scheduler gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='test@test.com',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
mock_log().exception.assert_called_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor(do_update=False)
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEqual(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, simple_ti) = ti_tuple
ti = simple_ti.construct_task_instance()
ti.task = dag_task1
self.assertEqual(ti.try_number, 1)
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
# removing self.assertEqual(ti.state, State.SCHEDULED)
# as scheduler will move state from SCHEDULED to QUEUED
# now the executor has cleared and it should be allowed the re-queue,
# but tasks stay in the executor.queued_tasks after executor.heartbeat()
# will be set back to SCHEDULED state
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# To verify that task does get re-queued.
executor.queued_tasks.clear()
executor.do_update = True
do_schedule()
ti.refresh_from_db()
self.assertIn(ti.state, [State.RUNNING, State.SUCCESS])
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id == dag.dag_id,
TI.task_id == dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER, "..", "dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
executor=self.null_exec,
subdir=dag_directory,
num_runs=1)
scheduler.run()
with create_session() as session:
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = \
(now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except Exception:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
def setup_dag(dag_id, schedule_interval, start_date, catchup):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag = DAG(dag_id,
schedule_interval=schedule_interval,
max_active_runs=1,
catchup=catchup,
default_args=default_args)
t1 = DummyOperator(task_id='t1', dag=dag)
t2 = DummyOperator(task_id='t2', dag=dag)
t2.set_upstream(t1)
t3 = DummyOperator(task_id='t3', dag=dag)
t3.set_upstream(t2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
return dag
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(
minute=0, second=0, microsecond=0)
half_an_hour_ago = now - datetime.timedelta(minutes=30)
two_hours_ago = now - datetime.timedelta(hours=2)
scheduler = SchedulerJob()
dag1 = setup_dag(dag_id='dag_with_catchup',
schedule_interval='* * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=True)
default_catchup = configuration.conf.getboolean('scheduler', 'catchup_by_default')
self.assertEqual(default_catchup, True)
self.assertEqual(dag1.catchup, True)
dag2 = setup_dag(dag_id='dag_without_catchup_ten_minute',
schedule_interval='*/10 * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last half an hour, not 6 hours ago
self.assertGreater(dr.execution_date, half_an_hour_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = setup_dag(dag_id='dag_without_catchup_hourly',
schedule_interval='@hourly',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag4 = setup_dag(dag_id='dag_without_catchup_once',
schedule_interval='@once',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag4)
self.assertIsNotNone(dr)
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = set()
expected_files = set()
# No_dags is empty, _invalid_ is ignored by .airflowignore
ignored_files = [
'no_dags.py',
'test_invalid_cron.py',
'test_zip_invalid_cron.zip',
]
for file_name in os.listdir(TEST_DAGS_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ignored_files:
expected_files.add(
'{}/{}'.format(TEST_DAGS_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=False):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
example_dag_folder = airflow.example_dags.__path__[0]
for root, dirs, files in os.walk(example_dag_folder):
for file_name in files:
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['__init__.py']:
expected_files.add(os.path.join(root, file_name))
detected_files.clear()
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=True):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(
0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEqual(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEqual(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(State.SCHEDULED, ti1.state)
self.assertEqual(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEqual(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEqual(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob()
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
|
xAPIConnector.py | import json
import socket
import logging
import time
import ssl
from threading import Thread
import yaml
# set to true on debug environment only
DEBUG = True
#default connection properites
DEFAULT_XAPI_ADDRESS = 'xapi.xtb.com'
#DEFAULT_XAPI_PORT = 5124 # Demo
#DEFUALT_XAPI_STREAMING_PORT = 5125 # Demo
DEFAULT_XAPI_PORT = 5112 # Real
DEFUALT_XAPI_STREAMING_PORT = 5113 # Real
# wrapper name and version
WRAPPER_NAME = 'python'
WRAPPER_VERSION = '2.5.0'
# API inter-command timeout (in ms)
API_SEND_TIMEOUT = 100
# max connection tries
API_MAX_CONN_TRIES = 3
# logger properties
logger = logging.getLogger("jsonSocket")
FORMAT = '[%(asctime)-15s][%(funcName)s:%(lineno)d] %(message)s'
logging.basicConfig(format=FORMAT)
if DEBUG:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.CRITICAL)
class TransactionSide(object):
BUY = 0
SELL = 1
BUY_LIMIT = 2
SELL_LIMIT = 3
BUY_STOP = 4
SELL_STOP = 5
class TransactionType(object):
ORDER_OPEN = 0
ORDER_CLOSE = 2
ORDER_MODIFY = 3
ORDER_DELETE = 4
class JsonSocket(object):
def __init__(self, address, port, encrypt = False):
self._ssl = encrypt
if self._ssl != True:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket = ssl.wrap_socket(sock)
self.conn = self.socket
self._timeout = None
self._address = address
self._port = port
self._decoder = json.JSONDecoder()
self._receivedData = ''
def connect(self):
for i in range(API_MAX_CONN_TRIES):
try:
self.socket.connect( (self.address, self.port) )
except socket.error as msg:
logger.error("SockThread Error: %s" % msg)
time.sleep(0.25);
continue
logger.info("Socket connected")
return True
return False
def _sendObj(self, obj):
msg = json.dumps(obj)
self._waitingSend(msg)
def _waitingSend(self, msg):
if self.socket:
sent = 0
msg = msg.encode('utf-8')
while sent < len(msg):
sent += self.conn.send(msg[sent:])
logger.info('Sent: ' + str(msg))
time.sleep(API_SEND_TIMEOUT/1000)
def _read(self, bytesSize=4096):
if not self.socket:
raise RuntimeError("socket connection broken")
while True:
char = self.conn.recv(bytesSize).decode()
self._receivedData += char
try:
(resp, size) = self._decoder.raw_decode(self._receivedData)
if size == len(self._receivedData):
self._receivedData = ''
break
elif size < len(self._receivedData):
self._receivedData = self._receivedData[size:].strip()
break
except ValueError as e:
continue
logger.info('Received: ' + str(resp))
return resp
def _readObj(self):
msg = self._read()
return msg
def close(self):
logger.debug("Closing socket")
self._closeSocket()
if self.socket is not self.conn:
logger.debug("Closing connection socket")
self._closeConnection()
def _closeSocket(self):
self.socket.close()
def _closeConnection(self):
self.conn.close()
def _get_timeout(self):
return self._timeout
def _set_timeout(self, timeout):
self._timeout = timeout
self.socket.settimeout(timeout)
def _get_address(self):
return self._address
def _set_address(self, address):
pass
def _get_port(self):
return self._port
def _set_port(self, port):
pass
def _get_encrypt(self):
return self._ssl
def _set_encrypt(self, encrypt):
pass
timeout = property(_get_timeout, _set_timeout, doc='Get/set the socket timeout')
address = property(_get_address, _set_address, doc='read only property socket address')
port = property(_get_port, _set_port, doc='read only property socket port')
encrypt = property(_get_encrypt, _set_encrypt, doc='read only property socket port')
class APIClient(JsonSocket):
def __init__(self, address=DEFAULT_XAPI_ADDRESS, port=DEFAULT_XAPI_PORT, encrypt=True):
super(APIClient, self).__init__(address, port, encrypt)
if(not self.connect()):
raise Exception("Cannot connect to " + address + ":" + str(port) + " after " + str(API_MAX_CONN_TRIES) + " retries")
def execute(self, dictionary):
self._sendObj(dictionary)
return self._readObj()
def disconnect(self):
self.close()
def commandExecute(self,commandName, arguments=None):
return self.execute(baseCommand(commandName, arguments))
class APIStreamClient(JsonSocket):
def __init__(self, address=DEFAULT_XAPI_ADDRESS, port=DEFUALT_XAPI_STREAMING_PORT, encrypt=True, ssId=None,
tickFun=None, tradeFun=None, balanceFun=None, tradeStatusFun=None, profitFun=None, newsFun=None):
super(APIStreamClient, self).__init__(address, port, encrypt)
self._ssId = ssId
self._tickFun = tickFun
self._tradeFun = tradeFun
self._balanceFun = balanceFun
self._tradeStatusFun = tradeStatusFun
self._profitFun = profitFun
self._newsFun = newsFun
if(not self.connect()):
raise Exception("Cannot connect to streaming on " + address + ":" + str(port) + " after " + str(API_MAX_CONN_TRIES) + " retries")
self._running = True
self._t = Thread(target=self._readStream, args=())
self._t.setDaemon(True)
self._t.start()
def _readStream(self):
while (self._running):
msg = self._readObj()
logger.info("Stream received: " + str(msg))
if (msg["command"]=='tickPrices'):
self._tickFun(msg)
elif (msg["command"]=='trade'):
self._tradeFun(msg)
elif (msg["command"]=="balance"):
self._balanceFun(msg)
elif (msg["command"]=="tradeStatus"):
self._tradeStatusFun(msg)
elif (msg["command"]=="profit"):
self._profitFun(msg)
elif (msg["command"]=="news"):
self._newsFun(msg)
def disconnect(self):
self._running = False
self._t.join()
self.close()
def execute(self, dictionary):
self._sendObj(dictionary)
def subscribePrice(self, symbol):
self.execute(dict(command='getTickPrices', symbol=symbol, streamSessionId=self._ssId))
def subscribePrices(self, symbols):
for symbolX in symbols:
self.subscribePrice(symbolX)
def subscribeTrades(self):
self.execute(dict(command='getTrades', streamSessionId=self._ssId))
def subscribeBalance(self):
self.execute(dict(command='getBalance', streamSessionId=self._ssId))
def subscribeTradeStatus(self):
self.execute(dict(command='getTradeStatus', streamSessionId=self._ssId))
def subscribeProfits(self):
self.execute(dict(command='getProfits', streamSessionId=self._ssId))
def subscribeNews(self):
self.execute(dict(command='getNews', streamSessionId=self._ssId))
def unsubscribePrice(self, symbol):
self.execute(dict(command='stopTickPrices', symbol=symbol, streamSessionId=self._ssId))
def unsubscribePrices(self, symbols):
for symbolX in symbols:
self.unsubscribePrice(symbolX)
def unsubscribeTrades(self):
self.execute(dict(command='stopTrades', streamSessionId=self._ssId))
def unsubscribeBalance(self):
self.execute(dict(command='stopBalance', streamSessionId=self._ssId))
def unsubscribeTradeStatus(self):
self.execute(dict(command='stopTradeStatus', streamSessionId=self._ssId))
def unsubscribeProfits(self):
self.execute(dict(command='stopProfits', streamSessionId=self._ssId))
def unsubscribeNews(self):
self.execute(dict(command='stopNews', streamSessionId=self._ssId))
# Command templates
def baseCommand(commandName, arguments=None):
if arguments==None:
arguments = dict()
return dict([('command', commandName), ('arguments', arguments)])
def loginCommand(userId, password, appName=''):
return baseCommand('login', dict(userId=userId, password=password, appName=appName))
|
platform_utils.py | #
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import platform
import select
import shutil
import stat
from pyversion import is_python3
if is_python3():
from queue import Queue
else:
from Queue import Queue
from threading import Thread
def isWindows():
""" Returns True when running with the native port of Python for Windows,
False when running on any other platform (including the Cygwin port of
Python).
"""
# Note: The cygwin port of Python returns "CYGWIN_NT_xxx"
return platform.system() == "Windows"
class FileDescriptorStreams(object):
""" Platform agnostic abstraction enabling non-blocking I/O over a
collection of file descriptors. This abstraction is required because
fctnl(os.O_NONBLOCK) is not supported on Windows.
"""
@classmethod
def create(cls):
""" Factory method: instantiates the concrete class according to the
current platform.
"""
if isWindows():
return _FileDescriptorStreamsThreads()
else:
return _FileDescriptorStreamsNonBlocking()
def __init__(self):
self.streams = []
def add(self, fd, dest, std_name):
""" Wraps an existing file descriptor as a stream.
"""
self.streams.append(self._create_stream(fd, dest, std_name))
def remove(self, stream):
""" Removes a stream, when done with it.
"""
self.streams.remove(stream)
@property
def is_done(self):
""" Returns True when all streams have been processed.
"""
return len(self.streams) == 0
def select(self):
""" Returns the set of streams that have data available to read.
The returned streams each expose a read() and a close() method.
When done with a stream, call the remove(stream) method.
"""
raise NotImplementedError
def _create_stream(fd, dest, std_name):
""" Creates a new stream wrapping an existing file descriptor.
"""
raise NotImplementedError
class _FileDescriptorStreamsNonBlocking(FileDescriptorStreams):
""" Implementation of FileDescriptorStreams for platforms that support
non blocking I/O.
"""
class Stream(object):
""" Encapsulates a file descriptor """
def __init__(self, fd, dest, std_name):
self.fd = fd
self.dest = dest
self.std_name = std_name
self.set_non_blocking()
def set_non_blocking(self):
import fcntl
flags = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def fileno(self):
return self.fd.fileno()
def read(self):
return self.fd.read(4096)
def close(self):
self.fd.close()
def _create_stream(self, fd, dest, std_name):
return self.Stream(fd, dest, std_name)
def select(self):
ready_streams, _, _ = select.select(self.streams, [], [])
return ready_streams
class _FileDescriptorStreamsThreads(FileDescriptorStreams):
""" Implementation of FileDescriptorStreams for platforms that don't support
non blocking I/O. This implementation requires creating threads issuing
blocking read operations on file descriptors.
"""
def __init__(self):
super(_FileDescriptorStreamsThreads, self).__init__()
# The queue is shared accross all threads so we can simulate the
# behavior of the select() function
self.queue = Queue(10) # Limit incoming data from streams
def _create_stream(self, fd, dest, std_name):
return self.Stream(fd, dest, std_name, self.queue)
def select(self):
# Return only one stream at a time, as it is the most straighforward
# thing to do and it is compatible with the select() function.
item = self.queue.get()
stream = item.stream
stream.data = item.data
return [stream]
class QueueItem(object):
""" Item put in the shared queue """
def __init__(self, stream, data):
self.stream = stream
self.data = data
class Stream(object):
""" Encapsulates a file descriptor """
def __init__(self, fd, dest, std_name, queue):
self.fd = fd
self.dest = dest
self.std_name = std_name
self.queue = queue
self.data = None
self.thread = Thread(target=self.read_to_queue)
self.thread.daemon = True
self.thread.start()
def close(self):
self.fd.close()
def read(self):
data = self.data
self.data = None
return data
def read_to_queue(self):
""" The thread function: reads everything from the file descriptor into
the shared queue and terminates when reaching EOF.
"""
for line in iter(self.fd.readline, b''):
self.queue.put(_FileDescriptorStreamsThreads.QueueItem(self, line))
self.fd.close()
self.queue.put(_FileDescriptorStreamsThreads.QueueItem(self, None))
def symlink(source, link_name):
"""Creates a symbolic link pointing to source named link_name.
Note: On Windows, source must exist on disk, as the implementation needs
to know whether to create a "File" or a "Directory" symbolic link.
"""
if isWindows():
import platform_utils_win32
source = _validate_winpath(source)
link_name = _validate_winpath(link_name)
target = os.path.join(os.path.dirname(link_name), source)
if os.path.isdir(target):
platform_utils_win32.create_dirsymlink(source, link_name)
else:
platform_utils_win32.create_filesymlink(source, link_name)
else:
return os.symlink(source, link_name)
def _validate_winpath(path):
path = os.path.normpath(path)
if _winpath_is_valid(path):
return path
raise ValueError("Path \"%s\" must be a relative path or an absolute "
"path starting with a drive letter".format(path))
def _winpath_is_valid(path):
"""Windows only: returns True if path is relative (e.g. ".\\foo") or is
absolute including a drive letter (e.g. "c:\\foo"). Returns False if path
is ambiguous (e.g. "x:foo" or "\\foo").
"""
assert isWindows()
path = os.path.normpath(path)
drive, tail = os.path.splitdrive(path)
if tail:
if not drive:
return tail[0] != os.sep # "\\foo" is invalid
else:
return tail[0] == os.sep # "x:foo" is invalid
else:
return not drive # "x:" is invalid
def rmtree(path):
if isWindows():
shutil.rmtree(path, onerror=handle_rmtree_error)
else:
shutil.rmtree(path)
def handle_rmtree_error(function, path, excinfo):
# Allow deleting read-only files
os.chmod(path, stat.S_IWRITE)
function(path)
def rename(src, dst):
if isWindows():
# On Windows, rename fails if destination exists, see
# https://docs.python.org/2/library/os.html#os.rename
try:
os.rename(src, dst)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(dst)
os.rename(src, dst)
else:
raise
else:
os.rename(src, dst)
def remove(path):
"""Remove (delete) the file path. This is a replacement for os.remove, but
allows deleting read-only files on Windows.
"""
if isWindows():
try:
os.remove(path)
except OSError as e:
if e.errno == errno.EACCES:
os.chmod(path, stat.S_IWRITE)
os.remove(path)
else:
raise
else:
os.remove(path)
def islink(path):
"""Test whether a path is a symbolic link.
Availability: Windows, Unix.
"""
if isWindows():
import platform_utils_win32
return platform_utils_win32.islink(path)
else:
return os.path.islink(path)
def readlink(path):
"""Return a string representing the path to which the symbolic link
points. The result may be either an absolute or relative pathname;
if it is relative, it may be converted to an absolute pathname using
os.path.join(os.path.dirname(path), result).
Availability: Windows, Unix.
"""
if isWindows():
import platform_utils_win32
return platform_utils_win32.readlink(path)
else:
return os.readlink(path)
def realpath(path):
"""Return the canonical path of the specified filename, eliminating
any symbolic links encountered in the path.
Availability: Windows, Unix.
"""
if isWindows():
current_path = os.path.abspath(path)
path_tail = []
for c in range(0, 100): # Avoid cycles
if islink(current_path):
target = readlink(current_path)
current_path = os.path.join(os.path.dirname(current_path), target)
else:
basename = os.path.basename(current_path)
if basename == '':
path_tail.append(current_path)
break
path_tail.append(basename)
current_path = os.path.dirname(current_path)
path_tail.reverse()
result = os.path.normpath(os.path.join(*path_tail))
return result
else:
return os.path.realpath(path)
|
image_sectioning.py | import errno
import os
import re
import cv2
import math
import numpy as np
import multiprocessing as mp
from PIL import Image, ImageDraw, ImageFont, ExifTags
path_regex = re.compile('.+?/(.*)$')
def PIL_to_cv2(img_PIL):
return cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)
def write_image(name, data, imgPIL, params):
#visualize predctions on images
m = params['re_fbase'].search(name)
overlay_pred = Image.new('RGBA',imgPIL.size, (0,0,0,0))
draw = ImageDraw.Draw(overlay_pred)
n_preds = len(data['scores'])
for i in range(n_preds):
draw.rectangle(data['boxes'][i],outline = (0,0,255,127), width=4)
imgPIL = Image.alpha_composite(imgPIL, overlay_pred).convert("RGB")
dir = os.path.dirname(name)
if not os.path.isdir(dir):
os.makedirs(dir)
imgPIL.save( m.group(1) + "_preds.jpg","JPEG")
def extract_exif_data(img):
img_exif = []
try:
img_exif = img._getexif()
except:
if hasattr(img, 'filename'):
print('{} has no exif data'.format(img.filename))
else:
print('image has no exif data')
return img_exif
def text_exif_labels(exif):
labeled = {}
for (key, val) in exif.items():
labeled[ExifTags.TAGS.get(key)] = val
return labeled
def properly_orient_image(image):
img_exif = extract_exif_data(image)
if(img_exif):
img_exif = text_exif_labels(img_exif)
else:
return image
orient_to_angle = {
1: 0,
3: 180,
6: 90,
8: 270
}
if('Orientation' in img_exif):
orient = img_exif['Orientation']
image = image.rotate(orient_to_angle[orient], expand = True)
else:
if hasattr(image, 'filename'):
print('no orientation in exif of {}'.format(image.filename))
else:
print('no orientation in exif of image')
return image
#rotate_image() is from https://stackoverflow.com/questions/43892506/opencv-python-rotate-image-without-cropping-sides/47248339
def rotate_image(mat, angle):
"""
Rotates an image (angle in degrees) and expands image to avoid cropping
"""
height, width = mat.shape[:2] # image shape has 3 dimensions4
image_center = (width/2, height/2) # getRotationMatrix2D needs coordinates in reverse order (width, height) compared to shape
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1.)
# rotation calculates the cos and sin, taking absolutes of those.
abs_cos = abs(rotation_mat[0,0])
abs_sin = abs(rotation_mat[0,1])
# find the new width and height boundsfor name in files
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
# subtract old image center (bringing image back to origo) and adding the new image center coordinates
rotation_mat[0, 2] += bound_w/2 - image_center[0]
rotation_mat[1, 2] += bound_h/2 - image_center[1]
# rotate image with the new bounds and translated rotation matrix
rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h))
return rotated_mat
def _section_single_image(im, section_dim):
sections = [] #image sections
offsets = [] #x,y offests of sections
dims = [] #x,y dimensions of sections
n_wide = section_dim[0]
n_high = section_dim[1]
im_height , im_width = im.shape[:2]
x_b = np.linspace(0,im_width , n_wide +1, dtype='int')
y_b = np.linspace(0,im_height, n_high +1, dtype='int')
for i in range(n_high):
for j in range(n_wide):
im_sec = im[y_b[i]:y_b[i+1],x_b[j]:x_b[j+1]]
sections.append(im_sec)
offsets.append([x_b[j], y_b[i]])
w = x_b[j+1] - x_b[j]
h = y_b[i+1] - y_b[i]
dims.append([w, h])
return sections, offsets, dims
#_section_images is core of splitting image - called by parallel prosesing pool
def _section_images(sec_data,files, dirpath, params):
# files = [f for f in files if not re.match(r'^\.',f)] #remove mac hidden files which start with dot
for name in files:
fullpath = os.path.join(dirpath,name)
m = path_regex.findall(dirpath)
is_imfile = params['re_fbase'].findall(name)
if(is_imfile):
dirpath_sub = m[0]
new_dirpath = os.path.join(params['outfld'],dirpath_sub)
#print('new_dirpath: ')
if not os.path.isdir(new_dirpath):
try:
os.makedirs(new_dirpath) #race condition which can result in an error if another process made teh directory already
except OSError as e:
if e.errno != errno.EEXIST:
raise
pass
file_base = os.path.splitext(name)[0]
with Image.open(fullpath) as im: #PIL image is lazy loading, weird file acces, so best to manage context using "with"
im_rot = properly_orient_image(im)
im_rot = PIL_to_cv2(im_rot)
im_sections, offsets, dims = _section_single_image(im_rot, params['section_dim'])
for i in range(len(im_sections)):
outfile = file_base + "_" + str(i) +'.jpg'
outpath = os.path.join(new_dirpath, outfile)
cv2.imwrite(outpath, im_sections[i])
sec_data[os.path.join(new_dirpath,file_base)] = {'fullpath': fullpath,
'fullsize': [im_rot.shape[1], im_rot.shape[0]], #width, height of full image
'offsets': offsets,
'dims': dims}
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def section_images(infolder, params):
n_proc = params['workers']
print('in section_images: n_proc: {}'.format(n_proc))
# pool = mp.Pool(processes = n_proc)
manager = mp.Manager()
sec_data = manager.dict()
for (dirpath, dirname, files) in os.walk(infolder, topdown='True'):
#send image files to split to n_proc different processes - all data is added to sec_data (manager.dict() - thread safe dict)
jobs = []
files = [f for f in files if not re.match(r'^\.',f)] #remove mac hidden files which start with dot
for chunk in chunks(files, math.ceil(len(files)/n_proc)):
#pdb.set_trace()
#pool.apply(_fsec, args = (sec_data,chunk, dirpath, params)) #this didn't work for me - always used a single core
j = mp.Process(target = _section_images, args = (sec_data, chunk, dirpath, params)) #this works - actually uses multiple cores
j.start()
jobs.append(j)
for j in jobs:
j.join()
return sec_data
|
netcdf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test NetCDF driver support.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2007, Frank Warmerdam <warmerdam@pobox.com>
# Copyright (c) 2008-2016, Even Rouault <even.rouault at spatialys.com>
# Copyright (c) 2010, Kyle Shannon <kyle at pobox dot com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import shutil
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
sys.path.append( '../pymod' )
import gdaltest
import test_cli_utilities
###############################################################################
# Netcdf Functions
###############################################################################
###############################################################################
# Get netcdf version and test for supported files
def netcdf_setup():
gdaltest.netcdf_drv_version = 'unknown'
gdaltest.netcdf_drv_has_nc2 = False
gdaltest.netcdf_drv_has_nc4 = False
gdaltest.netcdf_drv_has_hdf4 = False
gdaltest.netcdf_drv_silent = False;
gdaltest.netcdf_drv = gdal.GetDriverByName( 'NETCDF' )
if gdaltest.netcdf_drv is None:
print('NOTICE: netcdf not supported, skipping checks')
return 'skip'
#get capabilities from driver
metadata = gdaltest.netcdf_drv.GetMetadata()
if metadata is None:
print('NOTICE: netcdf metadata not found, skipping checks')
return 'skip'
#netcdf library version "3.6.3" of Dec 22 2009 06:10:17 $
#netcdf library version 4.1.1 of Mar 4 2011 12:52:19 $
if 'NETCDF_VERSION' in metadata:
v = metadata['NETCDF_VERSION']
v = v[ 0 : v.find(' ') ].strip('"');
gdaltest.netcdf_drv_version = v
if 'NETCDF_HAS_NC2' in metadata \
and metadata['NETCDF_HAS_NC2'] == 'YES':
gdaltest.netcdf_drv_has_nc2 = True
if 'NETCDF_HAS_NC4' in metadata \
and metadata['NETCDF_HAS_NC4'] == 'YES':
gdaltest.netcdf_drv_has_nc4 = True
if 'NETCDF_HAS_HDF4' in metadata \
and metadata['NETCDF_HAS_HDF4'] == 'YES':
gdaltest.netcdf_drv_has_hdf4 = True
print( 'NOTICE: using netcdf version ' + gdaltest.netcdf_drv_version + \
' has_nc2: '+str(gdaltest.netcdf_drv_has_nc2)+' has_nc4: ' + \
str(gdaltest.netcdf_drv_has_nc4) )
return 'success'
###############################################################################
# test file copy
# helper function needed so we can call Process() on it from netcdf_test_copy_timeout()
def netcdf_test_copy( ifile, band, checksum, ofile, opts=[], driver='NETCDF' ):
test = gdaltest.GDALTest( 'NETCDF', '../'+ifile, band, checksum, options=opts )
return test.testCreateCopy(check_gt=0, check_srs=0, new_filename=ofile, delete_copy = 0, check_minmax = 0)
###############################################################################
#test file copy, optional timeout arg
def netcdf_test_copy_timeout( ifile, band, checksum, ofile, opts=[], driver='NETCDF', timeout=None ):
from multiprocessing import Process
result = 'success'
drv = gdal.GetDriverByName( driver )
if os.path.exists( ofile ):
drv.Delete( ofile )
if timeout is None:
result = netcdf_test_copy( ifile, band, checksum, ofile, opts, driver )
else:
sys.stdout.write('.')
sys.stdout.flush()
proc = Process( target=netcdf_test_copy, args=(ifile, band, checksum, ofile, opts ) )
proc.start()
proc.join( timeout )
# if proc is alive after timeout we must terminate it, and return fail
# valgrind detects memory leaks when this occurs (although it should never happen)
if proc.is_alive():
proc.terminate()
if os.path.exists( ofile ):
drv.Delete( ofile )
print('testCreateCopy() for file %s has reached timeout limit of %d seconds' % (ofile, timeout) )
result = 'fail'
return result
###############################################################################
#check support for DEFLATE compression, requires HDF5 and zlib
def netcdf_test_deflate( ifile, checksum, zlevel=1, timeout=None ):
try:
from multiprocessing import Process
Process.is_alive
except:
print('from multiprocessing import Process failed')
return 'skip'
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ofile1 = 'tmp/' + os.path.basename(ifile) + '-1.nc'
ofile1_opts = [ 'FORMAT=NC4C', 'COMPRESS=NONE']
ofile2 = 'tmp/' + os.path.basename(ifile) + '-2.nc'
ofile2_opts = [ 'FORMAT=NC4C', 'COMPRESS=DEFLATE', 'ZLEVEL='+str(zlevel) ]
if not os.path.exists( ifile ):
gdaltest.post_reason( 'ifile %s does not exist' % ifile )
return 'fail'
result1 = netcdf_test_copy_timeout( ifile, 1, checksum, ofile1, ofile1_opts, 'NETCDF', timeout )
result2 = netcdf_test_copy_timeout( ifile, 1, checksum, ofile2, ofile2_opts, 'NETCDF', timeout )
if result1 == 'fail' or result2 == 'fail':
return 'fail'
# make sure compressed file is smaller than uncompressed files
try:
size1 = os.path.getsize( ofile1 )
size2 = os.path.getsize( ofile2 )
except:
gdaltest.post_reason( 'Error getting file sizes.' )
return 'fail'
if size2 >= size1:
gdaltest.post_reason( 'Compressed file is not smaller than reference, check your netcdf-4, HDF5 and zlib installation' )
return 'fail'
return 'success'
###############################################################################
# check support for reading attributes (single values and array values)
def netcdf_check_vars( ifile, vals_global=None, vals_band=None ):
src_ds = gdal.Open( ifile )
if src_ds is None:
gdaltest.post_reason( 'could not open dataset ' + ifile )
return 'fail'
metadata_global = src_ds.GetMetadata()
if metadata_global is None:
gdaltest.post_reason( 'could not get global metadata from ' + ifile )
return 'fail'
missval = src_ds.GetRasterBand(1).GetNoDataValue()
if missval != 1:
gdaltest.post_reason( 'got invalid nodata value %s for Band' % str(missval) )
return 'fail'
metadata_band = src_ds.GetRasterBand(1).GetMetadata()
if metadata_band is None:
gdaltest.post_reason( 'could not get Band metadata' )
return 'fail'
metadata = metadata_global
vals = vals_global
if vals is None:
vals = dict()
for k, v in vals.items():
if not k in metadata:
gdaltest.post_reason("missing metadata [%s]" % (str(k)))
return 'fail'
# strip { and } as new driver uses these for array values
mk = metadata[k].lstrip('{ ').rstrip('} ')
if mk != v:
gdaltest.post_reason("invalid value [%s] for metadata [%s]=[%s]" \
% (str(mk),str(k),str(v)))
return 'fail'
metadata = metadata_band
vals = vals_band
if vals is None:
vals = dict()
for k, v in vals.items():
if not k in metadata:
gdaltest.post_reason("missing metadata [%s]" % (str(k)))
return 'fail'
# strip { and } as new driver uses these for array values
mk = metadata[k].lstrip('{ ').rstrip('} ')
if mk != v:
gdaltest.post_reason("invalid value [%s] for metadata [%s]=[%s]" \
% (str(mk),str(k),str(v)))
return 'fail'
return 'success'
###############################################################################
# Netcdf Tests
###############################################################################
###############################################################################
# Perform simple read test.
def netcdf_1():
#setup netcdf environment
netcdf_setup()
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF', 'NETCDF:"data/bug636.nc":tas', 1, 31621,
filename_absolute = 1 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
result = tst.testOpen()
gdal.PopErrorHandler()
return result
###############################################################################
# Verify a simple createcopy operation. We can't do the trivial gdaltest
# operation because the new file will only be accessible via subdatasets.
def netcdf_2():
if gdaltest.netcdf_drv is None:
return 'skip'
src_ds = gdal.Open( 'data/byte.tif' )
gdaltest.netcdf_drv.CreateCopy( 'tmp/netcdf2.nc', src_ds)
tst = gdaltest.GDALTest( 'NetCDF', 'tmp/netcdf2.nc',
1, 4672,
filename_absolute = 1 )
wkt = """PROJCS["NAD27 / UTM zone 11N",
GEOGCS["NAD27",
DATUM["North_American_Datum_1927",
SPHEROID["Clarke 1866",6378206.4,294.9786982139006,
AUTHORITY["EPSG","7008"]],
AUTHORITY["EPSG","6267"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4267"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",-117],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AUTHORITY["EPSG","26711"]]"""
result = tst.testOpen( check_prj = wkt )
if result != 'success':
return result
# Test that in raster-only mode, update isn't supported (not sure what would be missing for that...)
with gdaltest.error_handler():
ds = gdal.Open( 'tmp/netcdf2.nc', gdal.GA_Update )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
gdaltest.clean_tmp()
return 'success'
###############################################################################
def netcdf_3():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/sombrero.grd' )
bnd = ds.GetRasterBand(1)
minmax = bnd.ComputeRasterMinMax()
if abs(minmax[0] - (-0.675758)) > 0.000001 or abs(minmax[1] - 1.0) > 0.000001:
gdaltest.post_reason( 'Wrong min or max.' )
return 'fail'
bnd = None
ds = None
return 'success'
###############################################################################
# In #2582 5dimensional files were causing problems. Verify use ok.
def netcdf_4():
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF',
'NETCDF:data/foo_5dimensional.nc:temperature',
3, 1218, filename_absolute = 1 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
#don't test for checksum (see bug #4284)
result = tst.testOpen(skip_checksum = True)
gdal.PopErrorHandler()
return result
###############################################################################
# In #2583 5dimensional files were having problems unrolling the highest
# dimension - check handling now on band 7.
def netcdf_5():
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF',
'NETCDF:data/foo_5dimensional.nc:temperature',
7, 1227, filename_absolute = 1 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
#don't test for checksum (see bug #4284)
result = tst.testOpen(skip_checksum = True)
gdal.PopErrorHandler()
return result
###############################################################################
#ticket #3324 check spatial reference reading for cf-1.4 lambert conformal
#1 standard parallel.
def netcdf_6():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_lcc1sp.nc' )
prj = ds.GetProjection( )
sr = osr.SpatialReference( )
sr.ImportFromWkt( prj )
lat_origin = sr.GetProjParm( 'latitude_of_origin' )
if lat_origin != 25:
gdaltest.post_reason( 'Latitude of origin does not match expected:\n%f'
% lat_origin )
return 'fail'
ds = None
return 'success'
###############################################################################
#ticket #3324 check spatial reference reading for cf-1.4 lambert conformal
#2 standard parallels.
def netcdf_7():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_lcc2sp.nc' )
prj = ds.GetProjection( )
sr = osr.SpatialReference( )
sr.ImportFromWkt( prj )
std_p1 = sr.GetProjParm( 'standard_parallel_1' )
std_p2 = sr.GetProjParm( 'standard_parallel_2' )
if std_p1 != 33.0 or std_p2 != 45.0:
gdaltest.post_reason( 'Standard Parallels do not match expected:\n%f,%f'
% ( std_p1, std_p2 ) )
return 'fail'
ds = None
sr = None
return 'success'
###############################################################################
#check for cf convention read of albers equal area
# Previous version compared entire wkt, which varies slightly among driver versions
# now just look for PROJECTION=Albers_Conic_Equal_Area and some parameters
def netcdf_8():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_aea2sp_invf.nc' )
srs = osr.SpatialReference( )
srs.ImportFromWkt( ds.GetProjection( ) )
proj = srs.GetAttrValue( 'PROJECTION' )
if proj != 'Albers_Conic_Equal_Area':
gdaltest.post_reason( 'Projection does not match expected : ' + proj )
return 'fail'
param = srs.GetProjParm('latitude_of_center')
if param != 37.5:
gdaltest.post_reason( 'Got wrong parameter value (%g)' % param )
return 'fail'
param = srs.GetProjParm('longitude_of_center')
if param != -96:
gdaltest.post_reason( 'Got wrong parameter value (%g)' % param )
return 'fail'
ds = None
return 'success'
###############################################################################
#check to see if projected systems default to wgs84 if no spheroid def
def netcdf_9():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_no_sphere.nc' )
prj = ds.GetProjection( )
sr = osr.SpatialReference( )
sr.ImportFromWkt( prj )
spheroid = sr.GetAttrValue( 'SPHEROID' )
if spheroid != 'WGS 84':
gdaltest.post_reason( 'Incorrect spheroid read from file\n%s'
% ( spheroid ) )
return 'fail'
ds = None
sr = None
return 'success'
###############################################################################
#check if km pixel size makes it through to gt
def netcdf_10():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_no_sphere.nc' )
prj = ds.GetProjection( )
gt = ds.GetGeoTransform( )
gt1 = ( -1897186.0290038721, 5079.3608398440065,
0.0,2674684.0244560046,
0.0,-5079.4721679684635 )
gt2 = ( -1897.186029003872, 5.079360839844003,
0.0, 2674.6840244560044,
0.0,-5.079472167968456 )
if gt != gt1:
sr = osr.SpatialReference()
sr.ImportFromWkt( prj )
#new driver uses UNIT vattribute instead of scaling values
if not (sr.GetAttrValue("PROJCS|UNIT",1)=="1000" and gt == gt2) :
gdaltest.post_reason( 'Incorrect geotransform, got '+str(gt) )
return 'fail'
ds = None
return 'success'
###############################################################################
#check if ll gets caught in km pixel size check
def netcdf_11():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_geog.nc' )
gt = ds.GetGeoTransform( )
if gt != (-0.5, 1.0, 0.0, 10.5, 0.0, -1.0):
gdaltest.post_reason( 'Incorrect geotransform' )
return 'fail'
ds = None
return 'success'
###############################################################################
#check for scale/offset set/get.
def netcdf_12():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/scale_offset.nc' )
scale = ds.GetRasterBand( 1 ).GetScale();
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != 0.01 or offset != 1.5:
gdaltest.post_reason( 'Incorrect scale(%f) or offset(%f)' % ( scale, offset ) )
return 'fail'
ds = None
return 'success'
###############################################################################
#check for scale/offset = None if no scale or offset is available
def netcdf_13():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/no_scale_offset.nc' )
scale = ds.GetRasterBand( 1 ).GetScale();
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != None or offset != None:
gdaltest.post_reason( 'Incorrect scale or offset' )
return 'fail'
ds = None
return 'success'
###############################################################################
#check for scale/offset for two variables
def netcdf_14():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'NETCDF:data/two_vars_scale_offset.nc:z' )
scale = ds.GetRasterBand( 1 ).GetScale();
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != 0.01 or offset != 1.5:
gdaltest.post_reason( 'Incorrect scale(%f) or offset(%f)' % ( scale, offset ) )
return 'fail'
ds = None
ds = gdal.Open( 'NETCDF:data/two_vars_scale_offset.nc:q' )
scale = ds.GetRasterBand( 1 ).GetScale();
offset = ds.GetRasterBand( 1 ).GetOffset()
scale = ds.GetRasterBand( 1 ).GetScale();
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != 0.1 or offset != 2.5:
gdaltest.post_reason( 'Incorrect scale(%f) or offset(%f)' % ( scale, offset ) )
return 'fail'
return 'success'
###############################################################################
#check support for netcdf-2 (64 bit)
# This test fails in 1.8.1, because the driver does not support NC2 (bug #3890)
def netcdf_15():
if gdaltest.netcdf_drv is None:
return 'skip'
if gdaltest.netcdf_drv_has_nc2:
ds = gdal.Open( 'data/trmm-nc2.nc' )
if ds is None:
return 'fail'
else:
ds = None
return 'success'
else:
return 'skip'
return 'success'
###############################################################################
#check support for netcdf-4
def netcdf_16():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/trmm-nc4.nc'
if gdaltest.netcdf_drv_has_nc4:
# test with Open()
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason('GDAL did not open file')
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if did not open with the netCDF driver (i.e. HDF5Image)
if name != 'netCDF':
gdaltest.post_reason('netcdf driver did not open file')
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name != 'netCDF':
gdaltest.post_reason('netcdf driver did not identify file')
return 'fail'
else:
return 'skip'
return 'success'
###############################################################################
#check support for netcdf-4 - make sure hdf5 is not read by netcdf driver
def netcdf_17():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/groups.h5'
#skip test if Hdf5 is not enabled
if gdal.GetDriverByName( 'HDF5' ) is None and \
gdal.GetDriverByName( 'HDF5Image' ) is None:
return 'skip'
if gdaltest.netcdf_drv_has_nc4:
#test with Open()
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason('GDAL did not open hdf5 file')
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if opened with the netCDF driver
if name == 'netCDF':
gdaltest.post_reason('netcdf driver opened hdf5 file')
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name == 'netCDF':
gdaltest.post_reason('netcdf driver was identified for hdf5 file')
return 'fail'
else:
return 'skip'
return 'success'
###############################################################################
#check support for netcdf-4 classic (NC4C)
def netcdf_18():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/trmm-nc4c.nc'
if gdaltest.netcdf_drv_has_nc4:
# test with Open()
ds = gdal.Open( ifile )
if ds is None:
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if did not open with the netCDF driver (i.e. HDF5Image)
if name != 'netCDF':
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name != 'netCDF':
return 'fail'
else:
return 'skip'
return 'success'
###############################################################################
#check support for reading with DEFLATE compression, requires NC4
def netcdf_19():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF', 'data/trmm-nc4z.nc', 1, 50235,
filename_absolute = 1 )
result = tst.testOpen(skip_checksum = True)
return result
###############################################################################
#check support for writing with DEFLATE compression, requires NC4
def netcdf_20():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
#simple test with tiny file
return netcdf_test_deflate( 'data/utm.tif', 50235 )
###############################################################################
#check support for writing large file with DEFLATE compression
#if chunking is not defined properly within the netcdf driver, this test can take 1h
def netcdf_21():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
if not gdaltest.run_slow_tests():
return 'skip'
bigfile = 'tmp/cache/utm-big.tif'
sys.stdout.write('.')
sys.stdout.flush()
#create cache dir if absent
if not os.path.exists( 'tmp/cache' ):
os.mkdir( 'tmp/cache' )
#look for large gtiff in cache
if not os.path.exists( bigfile ):
#create large gtiff
if test_cli_utilities.get_gdalwarp_path() is None:
gdaltest.post_reason('gdalwarp not found')
return 'skip'
warp_cmd = test_cli_utilities.get_gdalwarp_path() +\
' -q -overwrite -r bilinear -ts 7680 7680 -of gtiff ' +\
'data/utm.tif ' + bigfile
try:
(ret, err) = gdaltest.runexternal_out_and_err( warp_cmd )
except:
gdaltest.post_reason('gdalwarp execution failed')
return 'fail'
if ( err != '' or ret != '' ):
gdaltest.post_reason('gdalwarp returned error\n'+str(ret)+' '+str(err))
return 'fail'
# test compression of the file, with a conservative timeout of 60 seconds
return netcdf_test_deflate( bigfile, 26695, 6, 60 )
###############################################################################
#check support for hdf4
def netcdf_22():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_hdf4:
return 'skip'
ifile = 'data/hdifftst2.hdf'
#suppress warning
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open( 'NETCDF:' + ifile )
gdal.PopErrorHandler()
if ds is None:
gdaltest.post_reason('netcdf driver did not open hdf4 file')
return 'fail'
else:
ds = None
return 'success'
###############################################################################
#check support for hdf4 - make sure hdf4 file is not read by netcdf driver
def netcdf_23():
#don't skip if netcdf is not enabled in GDAL
#if gdaltest.netcdf_drv is None:
# return 'skip'
#if not gdaltest.netcdf_drv_has_hdf4:
# return 'skip'
#skip test if Hdf4 is not enabled in GDAL
if gdal.GetDriverByName( 'HDF4' ) is None and \
gdal.GetDriverByName( 'HDF4Image' ) is None:
return 'skip'
ifile = 'data/hdifftst2.hdf'
#test with Open()
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason('GDAL did not open hdf4 file')
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if opened with the netCDF driver
if name == 'netCDF':
gdaltest.post_reason('netcdf driver opened hdf4 file')
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name == 'netCDF':
gdaltest.post_reason('netcdf driver was identified for hdf4 file')
return 'fail'
return 'success'
###############################################################################
# check support for reading attributes (single values and array values)
def netcdf_24():
if gdaltest.netcdf_drv is None:
return 'skip'
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '1'}
vals_band = {'_Unsigned': 'true',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556',
'valid_range_s': '0,255'}
return netcdf_check_vars( 'data/nc_vars.nc', vals_global, vals_band )
###############################################################################
# check support for NC4 reading attributes (single values and array values)
def netcdf_24_nc4():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#test_string': 'testval_string',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '-100',
'NC_GLOBAL#test_ub': '200',
'NC_GLOBAL#test_s': '-16000',
'NC_GLOBAL#test_us': '32000',
'NC_GLOBAL#test_l': '-2000000000',
'NC_GLOBAL#test_ul': '4000000000'}
vals_band = {'test_string_arr': 'test,string,arr',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_ub': '1,200',
'valid_range_s': '0,255',
'valid_range_us': '0,32000',
'valid_range_l': '0,255',
'valid_range_ul': '0,4000000000',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556',
'valid_range_s': '0,255'}
return netcdf_check_vars( 'data/nc4_vars.nc', vals_global, vals_band )
###############################################################################
# check support for writing attributes (single values and array values)
def netcdf_25():
if gdaltest.netcdf_drv is None:
return 'skip'
result = netcdf_test_copy( 'data/nc_vars.nc', 1, None, 'tmp/netcdf_25.nc' )
if result != 'success':
return result
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '1'}
vals_band = {'_Unsigned': 'true',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556',
'valid_range_s': '0,255'}
return netcdf_check_vars( 'tmp/netcdf_25.nc', vals_global, vals_band )
###############################################################################
# check support for NC4 writing attributes (single values and array values)
def netcdf_25_nc4():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
result = netcdf_test_copy( 'data/nc4_vars.nc', 1, None, 'tmp/netcdf_25_nc4.nc', [ 'FORMAT=NC4' ] )
if result != 'success':
return result
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#test_string': 'testval_string',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '-100',
'NC_GLOBAL#test_ub': '200',
'NC_GLOBAL#test_s': '-16000',
'NC_GLOBAL#test_us': '32000',
'NC_GLOBAL#test_l': '-2000000000',
'NC_GLOBAL#test_ul': '4000000000'}
vals_band = {'test_string_arr': 'test,string,arr',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_ub': '1,200',
'valid_range_s': '0,255',
'valid_range_us': '0,32000',
'valid_range_l': '0,255',
'valid_range_ul': '0,4000000000',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556',
'valid_range_s': '0,255'}
return netcdf_check_vars( 'tmp/netcdf_25_nc4.nc', vals_global, vals_band )
###############################################################################
# check support for WRITE_BOTTOMUP file creation option
# use a dummy file with no lon/lat info to force a different checksum
# depending on y-axis order
def netcdf_26():
if gdaltest.netcdf_drv is None:
return 'skip'
#test default config
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4672 )
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
result = test.testCreateCopy(check_gt=0, check_srs=0, check_minmax = 0)
gdal.PopErrorHandler()
if result != 'success':
print('failed create copy without WRITE_BOTTOMUP')
return result
#test WRITE_BOTTOMUP=NO
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4855,
options=['WRITE_BOTTOMUP=NO'] )
result = test.testCreateCopy(check_gt=0, check_srs=0, check_minmax = 0)
if result != 'success':
print('failed create copy with WRITE_BOTTOMUP=NO')
return result
return 'success'
###############################################################################
# check support for GDAL_NETCDF_BOTTOMUP configuration option
def netcdf_27():
if gdaltest.netcdf_drv is None:
return 'skip'
#test default config
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4672 )
config_bak = gdal.GetConfigOption( 'GDAL_NETCDF_BOTTOMUP' )
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', None )
result = test.testOpen()
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', config_bak )
if result != 'success':
print('failed open without GDAL_NETCDF_BOTTOMUP')
return result
#test GDAL_NETCDF_BOTTOMUP=NO
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4855 )
config_bak = gdal.GetConfigOption( 'GDAL_NETCDF_BOTTOMUP' )
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', 'NO' )
result = test.testOpen()
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', config_bak )
if result != 'success':
print('failed open with GDAL_NETCDF_BOTTOMUP')
return result
return 'success'
###############################################################################
# check support for writing multi-dimensional files (helper function)
def netcdf_test_4dfile( ofile ):
# test result file has 8 bands and 0 subdasets (instead of 0 bands and 8 subdatasets)
ds = gdal.Open( ofile )
if ds is None:
gdaltest.post_reason( 'open of copy failed' )
return 'fail'
md = ds.GetMetadata( 'SUBDATASETS' )
subds_count = 0
if not md is None:
subds_count = len(md) / 2
if ds.RasterCount != 8 or subds_count != 0:
gdaltest.post_reason( 'copy has %d bands (expected 8) and has %d subdatasets'\
' (expected 0)' % (ds.RasterCount, subds_count ) )
return 'fail'
ds is None
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except:
print('NOTICE: ncdump not found')
return 'success'
if err == None or not 'netcdf library version' in err:
print('NOTICE: ncdump not found')
return 'success'
(ret, err) = gdaltest.runexternal_out_and_err( 'ncdump -h '+ ofile )
if ret == '' or err != '':
gdaltest.post_reason( 'ncdump failed' )
return 'fail'
# simple dimension tests using ncdump output
err = ""
if not 'int t(time, levelist, lat, lon) ;' in ret:
err = err + 'variable (t) has wrong dimensions or is missing\n'
if not 'levelist = 2 ;' in ret:
err = err + 'levelist dimension is missing or incorrect\n'
if not 'int levelist(levelist) ;' in ret:
err = err + 'levelist variable is missing or incorrect\n'
if not 'time = 4 ;' in ret:
err = err + 'time dimension is missing or incorrect\n'
if not 'double time(time) ;' in ret:
err = err + 'time variable is missing or incorrect\n'
# uncomment this to get full header in output
#if err != '':
# err = err + ret
if err != '':
gdaltest.post_reason( err )
return 'fail'
return 'success'
###############################################################################
# check support for writing multi-dimensional files using CreateCopy()
def netcdf_28():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/netcdf-4d.nc'
ofile = 'tmp/netcdf_28.nc'
# copy file
result = netcdf_test_copy( ifile, 0, None, ofile )
if result != 'success':
return 'fail'
# test file
return netcdf_test_4dfile( ofile )
###############################################################################
# Check support for writing multi-dimensional files using gdalwarp.
# Requires metadata copy support in gdalwarp (see bug #3898).
# First create a vrt file using gdalwarp, then copy file to netcdf.
# The workaround is (currently ??) necessary because dimension rolling code is
# in netCDFDataset::CreateCopy() and necessary dimension metadata
# is not saved to netcdf when using gdalwarp (as the driver does not write
# metadata to netcdf file with SetMetadata() and SetMetadataItem()).
def netcdf_29():
if gdaltest.netcdf_drv is None:
return 'skip'
# create tif file using gdalwarp
if test_cli_utilities.get_gdalwarp_path() is None:
gdaltest.post_reason('gdalwarp not found')
return 'skip'
ifile = 'data/netcdf-4d.nc'
ofile1 = 'tmp/netcdf_29.vrt'
ofile = 'tmp/netcdf_29.nc'
warp_cmd = '%s -q -overwrite -of vrt %s %s' %\
( test_cli_utilities.get_gdalwarp_path(), ifile, ofile1 )
try:
(ret, err) = gdaltest.runexternal_out_and_err( warp_cmd )
except:
gdaltest.post_reason('gdalwarp execution failed')
return 'fail'
if ( err != '' or ret != '' ):
gdaltest.post_reason('gdalwarp returned error\n'+str(ret)+' '+str(err))
return 'fail'
# copy vrt to netcdf, with proper dimension rolling
result = netcdf_test_copy( ofile1, 0, None, ofile )
if result != 'success':
return 'fail'
# test file
result = netcdf_test_4dfile( ofile )
if result == 'fail':
print('test failed - does gdalwarp support metadata copying?')
return result
###############################################################################
# check support for file with nan values (bug #4705)
def netcdf_30():
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF', 'trmm-nan.nc', 1, 62519 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
result = tst.testOpen()
gdal.PopErrorHandler()
return result
###############################################################################
#check if 2x2 file has proper geotransform
#1 pixel (in width or height) still unsupported because we can't get the pixel dimensions
def netcdf_31():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/trmm-2x2.nc' )
ds.GetProjection( )
gt = ds.GetGeoTransform( )
gt1 = ( -80.0, 0.25, 0.0, -19.5, 0.0, -0.25 )
if gt != gt1:
gdaltest.post_reason( 'Incorrect geotransform, got '+str(gt) )
return 'fail'
ds = None
return 'success'
###############################################################################
# Test NC_UBYTE write/read - netcdf-4 (FORMAT=NC4) only (#5053)
def netcdf_32():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ifile = 'data/byte.tif'
ofile = 'tmp/netcdf_32.nc'
#gdal.SetConfigOption('CPL_DEBUG', 'ON')
# test basic read/write
result = netcdf_test_copy( ifile, 1, 4672, ofile, [ 'FORMAT=NC4' ] )
if result != 'success':
return 'fail'
result = netcdf_test_copy( ifile, 1, 4672, ofile, [ 'FORMAT=NC4C' ] )
if result != 'success':
return 'fail'
return 'success'
###############################################################################
# TEST NC_UBYTE metadata read - netcdf-4 (FORMAT=NC4) only (#5053)
def netcdf_33():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/nc_vars.nc'
ofile = 'tmp/netcdf_33.nc'
result = netcdf_test_copy( ifile, 1, None, ofile, [ 'FORMAT=NC4' ] )
if result != 'success':
return result
return netcdf_check_vars( 'tmp/netcdf_33.nc' )
###############################################################################
# check support for reading large file with chunking and DEFLATE compression
# if chunking is not supported within the netcdf driver, this test can take very long
def netcdf_34():
filename = 'utm-big-chunks.nc'
# this timeout is more than enough - on my system takes <1s with fix, about 25 seconds without
timeout = 5
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
if not gdaltest.run_slow_tests():
return 'skip'
try:
from multiprocessing import Process
except:
print('from multiprocessing import Process failed')
return 'skip'
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/netcdf/'+filename,filename):
return 'skip'
sys.stdout.write('.')
sys.stdout.flush()
tst = gdaltest.GDALTest( 'NetCDF', '../tmp/cache/'+filename, 1, 31621 )
#tst.testOpen()
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
proc = Process( target=tst.testOpen )
proc.start()
proc.join( timeout )
gdal.PopErrorHandler()
# if proc is alive after timeout we must terminate it, and return fail
# valgrind detects memory leaks when this occurs (although it should never happen)
if proc.is_alive():
proc.terminate()
print('testOpen() for file %s has reached timeout limit of %d seconds' % (filename, timeout) )
return 'fail'
return 'success'
###############################################################################
# test writing a long metadata > 8196 chars (bug #5113)
def netcdf_35():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/netcdf_fixes.nc'
ofile = 'tmp/netcdf_35.nc'
# copy file
result = netcdf_test_copy( ifile, 0, None, ofile )
if result != 'success':
return 'fail'
# test long metadata is copied correctly
ds = gdal.Open( ofile )
if ds is None:
gdaltest.post_reason( 'open of copy failed' )
return 'fail'
md = ds.GetMetadata( '' )
if not 'U#bla' in md:
gdaltest.post_reason( 'U#bla metadata absent' )
return 'fail'
bla = md['U#bla']
if not len(bla) == 9591:
gdaltest.post_reason( 'U#bla metadata is of length %d, expecting %d' % (len(bla),9591) )
return 'fail'
if not bla[-4:] == '_bla':
gdaltest.post_reason( 'U#bla metadata ends with [%s], expecting [%s]' % (bla[-4:], '_bla') )
return 'fail'
return 'success'
###############################################################################
# test for correct geotransform (bug #5114)
def netcdf_36():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/netcdf_fixes.nc'
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason( 'open failed' )
return 'fail'
gt = ds.GetGeoTransform( )
if gt is None:
gdaltest.post_reason( 'got no GeoTransform' )
return 'fail'
gt_expected = (-3.498749944898817, 0.0025000042385525173, 0.0, 46.61749818589952, 0.0, -0.001666598849826389)
if gt != gt_expected:
gdaltest.post_reason( 'got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)) )
return 'fail'
return 'success'
###############################################################################
# test for reading gaussian grid (bugs #4513 and #5118)
def netcdf_37():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/reduce-cgcms.nc'
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open( ifile )
gdal.PopErrorHandler()
if ds is None:
gdaltest.post_reason( 'open failed' )
return 'fail'
gt = ds.GetGeoTransform( )
if gt is None:
gdaltest.post_reason( 'got no GeoTransform' )
return 'fail'
gt_expected = (-1.875, 3.75, 0.0, 89.01354337620016, 0.0, -3.7088976406750063)
if gt != gt_expected:
gdaltest.post_reason( 'got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)) )
return 'fail'
md = ds.GetMetadata( 'GEOLOCATION2' )
if not md or not 'Y_VALUES' in md:
gdaltest.post_reason( 'did not get 1D geolocation' )
return 'fail'
y_vals = md['Y_VALUES']
if not y_vals.startswith('{-87.15909455586265,-83.47893666931698,') \
or not y_vals.endswith(',83.47893666931698,87.15909455586265}'):
gdaltest.post_reason( 'got incorrect values in 1D geolocation' )
return 'fail'
return 'success'
###############################################################################
# test for correct geotransform of projected data in km units (bug #5118)
def netcdf_38():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/bug5118.nc'
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open( ifile )
gdal.PopErrorHandler()
if ds is None:
gdaltest.post_reason( 'open failed' )
return 'fail'
gt = ds.GetGeoTransform( )
if gt is None:
gdaltest.post_reason( 'got no GeoTransform' )
return 'fail'
gt_expected = (-1659.3478178136488, 13.545000861672793, 0.0, 2330.054725283668, 0.0, -13.54499744233631)
if gt != gt_expected:
gdaltest.post_reason( 'got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)) )
return 'fail'
return 'success'
###############################################################################
# Test VRT and NETCDF:
def netcdf_39():
if gdaltest.netcdf_drv is None:
return 'skip'
shutil.copy('data/two_vars_scale_offset.nc', 'tmp')
src_ds = gdal.Open('NETCDF:tmp/two_vars_scale_offset.nc:z')
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/two_vars_scale_offset.nc')
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
shutil.copy('data/two_vars_scale_offset.nc', 'tmp')
src_ds = gdal.Open('NETCDF:"tmp/two_vars_scale_offset.nc":z')
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/two_vars_scale_offset.nc')
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
shutil.copy('data/two_vars_scale_offset.nc', 'tmp')
src_ds = gdal.Open('NETCDF:"%s/tmp/two_vars_scale_offset.nc":z' % os.getcwd())
out_ds = gdal.GetDriverByName('VRT').CreateCopy('%s/tmp/netcdf_39.vrt' % os.getcwd(), src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/two_vars_scale_offset.nc')
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
src_ds = gdal.Open('NETCDF:"%s/data/two_vars_scale_offset.nc":z' % os.getcwd())
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds)
del out_ds
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
return 'success'
###############################################################################
# Check support of reading of chunked bottom-up files.
def netcdf_40():
if gdaltest.netcdf_drv is None or not gdaltest.netcdf_drv_has_nc4:
return 'skip'
return netcdf_test_copy( 'data/bug5291.nc', 0, None, 'tmp/netcdf_40.nc' )
###############################################################################
# Test support for georeferenced file without CF convention
def netcdf_41():
if gdaltest.netcdf_drv is None:
return 'skip'
with gdaltest.error_handler():
ds = gdal.Open('data/byte_no_cf.nc')
if ds.GetGeoTransform() != (440720, 60, 0, 3751320, 0, -60):
gdaltest.post_reason('failure')
print(ds.GetGeoTransform())
return 'fail'
if ds.GetProjectionRef().find('26711') < 0:
gdaltest.post_reason('failure')
print(ds.GetGeoTransform())
return 'fail'
return 'success'
###############################################################################
# Test writing & reading GEOLOCATION array
def netcdf_42():
if gdaltest.netcdf_drv is None:
return 'skip'
src_ds = gdal.GetDriverByName('MEM').Create('', 60, 39, 1)
src_ds.SetMetadata( [
'LINE_OFFSET=0',
'LINE_STEP=1',
'PIXEL_OFFSET=0',
'PIXEL_STEP=1',
'SRS=GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9108"]],AXIS["Lat",NORTH],AXIS["Long",EAST],AUTHORITY["EPSG","4326"]]',
'X_BAND=1',
'X_DATASET=../gcore/data/sstgeo.tif',
'Y_BAND=2',
'Y_DATASET=../gcore/data/sstgeo.tif'], 'GEOLOCATION' )
sr = osr.SpatialReference()
sr.ImportFromEPSG(32631)
src_ds.SetProjection(sr.ExportToWkt())
gdaltest.netcdf_drv.CreateCopy('tmp/netcdf_42.nc', src_ds)
ds = gdal.Open('tmp/netcdf_42.nc')
if ds.GetMetadata('GEOLOCATION') != {
'LINE_OFFSET': '0',
'X_DATASET': 'NETCDF:"tmp/netcdf_42.nc":lon',
'PIXEL_STEP': '1',
'SRS': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'PIXEL_OFFSET': '0',
'X_BAND': '1',
'LINE_STEP': '1',
'Y_DATASET': 'NETCDF:"tmp/netcdf_42.nc":lat',
'Y_BAND': '1'}:
gdaltest.post_reason('failure')
print(ds.GetMetadata('GEOLOCATION'))
return 'fail'
ds = gdal.Open('NETCDF:"tmp/netcdf_42.nc":lon')
if ds.GetRasterBand(1).Checksum() != 36043:
gdaltest.post_reason('failure')
print(ds.GetRasterBand(1).Checksum())
return 'fail'
ds = gdal.Open('NETCDF:"tmp/netcdf_42.nc":lat')
if ds.GetRasterBand(1).Checksum() != 33501:
gdaltest.post_reason('failure')
print(ds.GetRasterBand(1).Checksum())
return 'fail'
return 'success'
###############################################################################
# Test reading GEOLOCATION array from geotransform (non default)
def netcdf_43():
if gdaltest.netcdf_drv is None:
return 'skip'
src_ds = gdal.Open('data/byte.tif')
gdaltest.netcdf_drv.CreateCopy('tmp/netcdf_43.nc', src_ds, options = ['WRITE_LONLAT=YES'] )
ds = gdal.Open('tmp/netcdf_43.nc')
if ds.GetMetadata('GEOLOCATION') != {
'LINE_OFFSET': '0',
'X_DATASET': 'NETCDF:"tmp/netcdf_43.nc":lon',
'PIXEL_STEP': '1',
'SRS': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'PIXEL_OFFSET': '0',
'X_BAND': '1',
'LINE_STEP': '1',
'Y_DATASET': 'NETCDF:"tmp/netcdf_43.nc":lat',
'Y_BAND': '1'}:
gdaltest.post_reason('failure')
print(ds.GetMetadata('GEOLOCATION'))
return 'fail'
return 'success'
###############################################################################
# Test NC_USHORT/UINT read/write - netcdf-4 only (#6337)
def netcdf_44():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
for f, md5 in ('data/ushort.nc', 18), ('data/uint.nc', 10):
if (netcdf_test_copy( f, 1, md5, 'tmp/netcdf_44.nc', [ 'FORMAT=NC4' ] )
!= 'success'):
return 'fail'
return 'success'
###############################################################################
# Test reading a vector NetCDF 3 file
def netcdf_45():
if gdaltest.netcdf_drv is None:
return 'skip'
# Test that a vector cannot be opened in raster-only mode
ds = gdal.OpenEx( 'data/test_ogr_nc3.nc', gdal.OF_RASTER )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Test that a raster cannot be opened in vector-only mode
ds = gdal.OpenEx( 'data/cf-bug636.nc', gdal.OF_VECTOR )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx( 'data/test_ogr_nc3.nc', gdal.OF_VECTOR )
with gdaltest.error_handler():
gdal.VectorTranslate( '/vsimem/netcdf_45.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_45.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string1char,string3chars,twodimstringchar,date,datetime_explicit_fillValue,datetime,int64var,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,x,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,1234567890123,1,1,1.2,1.2,123,12,5,-125
"POINT (1 2)",,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
fp = gdal.VSIFOpenL( '/vsimem/netcdf_45.csvt', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(1),String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_45.csv')
gdal.Unlink('/vsimem/netcdf_45.csvt')
return 'success'
###############################################################################
# Test reading a vector NetCDF 3 file
def netcdf_46():
if gdaltest.netcdf_drv is None:
return 'skip'
if test_cli_utilities.get_test_ogrsf_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro data/test_ogr_nc3.nc')
if ret.find('INFO') == -1 or ret.find('ERROR') != -1:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test reading a vector NetCDF 4 file
def netcdf_47():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
# Test that a vector cannot be opened in raster-only mode
with gdaltest.error_handler():
ds = gdal.OpenEx( 'data/test_ogr_nc4.nc', gdal.OF_RASTER )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx( 'data/test_ogr_nc4.nc', gdal.OF_VECTOR )
with gdaltest.error_handler():
gdal.VectorTranslate( '/vsimem/netcdf_47.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_47.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string3chars,twodimstringchar,date,datetime,datetime_explicit_fillValue,int64,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field,ubyte_field,ubyte_field_explicit_fillValue,ushort_field,ushort_field_explicit_fillValue,uint_field,uint_field_explicit_fillValue,uint64_field,uint64_field_explicit_fillValue
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,,1,1,1.2,1.2,123,12,5,-125,254,255,65534,65535,4000000000,4294967295,1234567890123,
"POINT (1 2)",,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,,,,,,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
fp = gdal.VSIFOpenL( '/vsimem/netcdf_47.csvt', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer,Integer,Integer,Integer,Integer,Integer64,Integer64,Real,Real
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_47.csv')
gdal.Unlink('/vsimem/netcdf_47.csvt')
return 'success'
###############################################################################
# Test reading a vector NetCDF 3 file without any geometry
def netcdf_48():
if gdaltest.netcdf_drv is None:
return 'skip'
with gdaltest.error_handler():
ds = gdal.OpenEx( 'data/test_ogr_no_xyz_var.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
if lyr.GetGeomType() != ogr.wkbNone:
gdaltest.post_reason('failure')
return 'fail'
f = lyr.GetNextFeature()
if f['int32'] != 1:
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test reading a vector NetCDF 3 file with X,Y,Z vars as float
def netcdf_49():
if gdaltest.netcdf_drv is None:
return 'skip'
with gdaltest.error_handler():
ds = gdal.OpenEx( 'data/test_ogr_xyz_float.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( '/vsimem/netcdf_49.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_49.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32
"POINT Z (1 2 3)",1
"POINT (1 2)",
,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_49.csv')
return 'success'
###############################################################################
# Test creating a vector NetCDF 3 file with WKT geometry field
def netcdf_50():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.OpenEx( '../ogr/data/poly.shp', gdal.OF_VECTOR )
out_ds = gdal.VectorTranslate( 'tmp/netcdf_50.nc', ds, format = 'netCDF', layerCreationOptions = [ 'WKT_DEFAULT_WIDTH=1'] )
src_lyr = ds.GetLayer(0)
src_lyr.ResetReading()
out_lyr = out_ds.GetLayer(0)
out_lyr.ResetReading()
src_f = src_lyr.GetNextFeature()
out_f = out_lyr.GetNextFeature()
src_f.SetFID(-1)
out_f.SetFID(-1)
src_json = src_f.ExportToJson()
out_json = out_f.ExportToJson()
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
out_ds = None
out_ds = gdal.OpenEx( 'tmp/netcdf_50.nc', gdal.OF_VECTOR )
out_lyr = out_ds.GetLayer(0)
srs = out_lyr.GetSpatialRef().ExportToWkt()
if srs.find('PROJCS["OSGB 1936') < 0:
gdaltest.post_reason('failure')
print(srs)
return 'fail'
out_f = out_lyr.GetNextFeature()
out_f.SetFID(-1)
out_json = out_f.ExportToJson()
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
out_ds = None
gdal.Unlink('tmp/netcdf_50.nc')
return 'success'
###############################################################################
# Test creating a vector NetCDF 3 file with X,Y,Z fields
def netcdf_51():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.OpenEx( 'data/test_ogr_nc3.nc', gdal.OF_VECTOR )
# Test autogrow of string fields
gdal.VectorTranslate( 'tmp/netcdf_51.nc', ds, format = 'netCDF', layerCreationOptions = [ 'STRING_DEFAULT_WIDTH=1'] )
with gdaltest.error_handler():
ds = gdal.OpenEx( 'tmp/netcdf_51.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( '/vsimem/netcdf_51.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT'] )
ds = None
fp = gdal.VSIFOpenL( '/vsimem/netcdf_51.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string1char,string3chars,twodimstringchar,date,datetime_explicit_fillValue,datetime,int64var,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,x,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,1234567890123,1,1,1.2,1.2,123,12,5,-125
"POINT Z (1 2 0)",,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
fp = gdal.VSIFOpenL( '/vsimem/netcdf_51.csvt', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(1),String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
ds = gdal.OpenEx( 'tmp/netcdf_51.nc', gdal.OF_VECTOR | gdal.OF_UPDATE )
lyr = ds.GetLayer(0)
lyr.CreateField( ogr.FieldDefn('extra', ogr.OFTInteger) )
lyr.CreateField( ogr.FieldDefn('extra_str', ogr.OFTString) )
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('failure')
return 'fail'
f['extra'] = 5
f['extra_str'] = 'foobar'
if lyr.CreateFeature(f) != 0:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.OpenEx( 'tmp/netcdf_51.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
if f['int32'] != 1 or f['extra'] != 5 or f['extra_str'] != 'foobar':
gdaltest.post_reason('failure')
return 'fail'
f = None
ds = None
import netcdf_cf
if netcdf_cf.netcdf_cf_setup() == 'success' and \
gdaltest.netcdf_cf_method is not None:
result_cf = netcdf_cf.netcdf_cf_check_file( 'tmp/netcdf_51.nc','auto',False )
if result_cf != 'success':
gdaltest.post_reason('failure')
return 'fail'
gdal.Unlink('tmp/netcdf_51.nc')
gdal.Unlink('tmp/netcdf_51.csv')
gdal.Unlink('tmp/netcdf_51.csvt')
return 'success'
###############################################################################
# Test creating a vector NetCDF 3 file with X,Y,Z fields with WRITE_GDAL_TAGS=NO
def netcdf_51_no_gdal_tags():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.OpenEx( 'data/test_ogr_nc3.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( 'tmp/netcdf_51_no_gdal_tags.nc', ds, format = 'netCDF', datasetCreationOptions = [ 'WRITE_GDAL_TAGS=NO'] )
with gdaltest.error_handler():
ds = gdal.OpenEx( 'tmp/netcdf_51_no_gdal_tags.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( '/vsimem/netcdf_51_no_gdal_tags.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT'] )
ds = None
fp = gdal.VSIFOpenL( '/vsimem/netcdf_51_no_gdal_tags.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string1char,string3chars,twodimstringchar,date,datetime_explicit_fillValue,datetime,int64var,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x1,byte_field
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,x,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,1234567890123,1,1,1.2,1.2,123,12,5,-125
"POINT Z (1 2 0)",,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
fp = gdal.VSIFOpenL( '/vsimem/netcdf_51_no_gdal_tags.csvt', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(1),String(3),String(10),Date,DateTime,DateTime,Real,Real,Integer,Integer,Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('tmp/netcdf_51_no_gdal_tags.nc')
gdal.Unlink('tmp/netcdf_51_no_gdal_tags.csv')
gdal.Unlink('tmp/netcdf_51_no_gdal_tags.csvt')
return 'success'
###############################################################################
# Test creating a vector NetCDF 4 file with X,Y,Z fields
def netcdf_52():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ds = gdal.OpenEx( 'data/test_ogr_nc4.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( 'tmp/netcdf_52.nc', ds, format = 'netCDF', datasetCreationOptions = ['FORMAT=NC4'] )
with gdaltest.error_handler():
ds = gdal.OpenEx( 'tmp/netcdf_52.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( '/vsimem/netcdf_52.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT'] )
ds = None
fp = gdal.VSIFOpenL( '/vsimem/netcdf_52.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string3chars,twodimstringchar,date,datetime,datetime_explicit_fillValue,int64,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field,ubyte_field,ubyte_field_explicit_fillValue,ushort_field,ushort_field_explicit_fillValue,uint_field,uint_field_explicit_fillValue,uint64_field,uint64_field_explicit_fillValue
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,,1,1,1.2,1.2,123,12,5,-125,254,255,65534,65535,4000000000,4294967295,1234567890123,
"POINT Z (1 2 0)",,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,,,,,,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
fp = gdal.VSIFOpenL( '/vsimem/netcdf_52.csvt', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer,Integer,Integer,Integer,Integer,Integer64,Integer64,Real,Real
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
ds = gdal.OpenEx( 'tmp/netcdf_52.nc', gdal.OF_VECTOR | gdal.OF_UPDATE )
lyr = ds.GetLayer(0)
lyr.CreateField( ogr.FieldDefn('extra', ogr.OFTInteger) )
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('failure')
return 'fail'
f['extra'] = 5
if lyr.CreateFeature(f) != 0:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.OpenEx( 'tmp/netcdf_52.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
if f['int32'] != 1 or f['extra'] != 5:
gdaltest.post_reason('failure')
return 'fail'
f = None
ds = None
import netcdf_cf
if netcdf_cf.netcdf_cf_setup() == 'success' and \
gdaltest.netcdf_cf_method is not None:
result_cf = netcdf_cf.netcdf_cf_check_file( 'tmp/netcdf_52.nc','auto',False )
if result_cf != 'success':
gdaltest.post_reason('failure')
return 'fail'
gdal.Unlink('tmp/netcdf_52.nc')
gdal.Unlink('tmp/netcdf_52.csv')
gdal.Unlink('tmp/netcdf_52.csvt')
return 'success'
###############################################################################
# Test creating a vector NetCDF 4 file with WKT geometry field
def netcdf_53():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ds = gdal.OpenEx( '../ogr/data/poly.shp', gdal.OF_VECTOR )
out_ds = gdal.VectorTranslate( 'tmp/netcdf_53.nc', ds, format = 'netCDF', datasetCreationOptions = ['FORMAT=NC4'] )
src_lyr = ds.GetLayer(0)
src_lyr.ResetReading()
out_lyr = out_ds.GetLayer(0)
out_lyr.ResetReading()
src_f = src_lyr.GetNextFeature()
out_f = out_lyr.GetNextFeature()
src_f.SetFID(-1)
out_f.SetFID(-1)
src_json = src_f.ExportToJson()
out_json = out_f.ExportToJson()
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
out_ds = None
out_ds = gdal.OpenEx( 'tmp/netcdf_53.nc', gdal.OF_VECTOR )
out_lyr = out_ds.GetLayer(0)
srs = out_lyr.GetSpatialRef().ExportToWkt()
if srs.find('PROJCS["OSGB 1936') < 0:
gdaltest.post_reason('failure')
print(srs)
return 'fail'
out_f = out_lyr.GetNextFeature()
out_f.SetFID(-1)
out_json = out_f.ExportToJson()
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
out_ds = None
gdal.Unlink('tmp/netcdf_53.nc')
return 'success'
###############################################################################
# Test appending to a vector NetCDF 4 file with unusual types (ubyte, ushort...)
def netcdf_54():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
shutil.copy( 'data/test_ogr_nc4.nc', 'tmp/netcdf_54.nc')
ds = gdal.OpenEx( 'tmp/netcdf_54.nc', gdal.OF_VECTOR | gdal.OF_UPDATE )
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('failure')
return 'fail'
f['int32'] += 1
f.SetFID(-1)
f.ExportToJson()
src_json = f.ExportToJson()
if lyr.CreateFeature(f) != 0:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.OpenEx( 'tmp/netcdf_54.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
f.SetFID(-1)
out_json = f.ExportToJson()
f = None
ds = None
gdal.Unlink('tmp/netcdf_54.nc')
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
return 'success'
###############################################################################
# Test auto-grow of bidimensional char variables in a vector NetCDF 4 file
def netcdf_55():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
shutil.copy( 'data/test_ogr_nc4.nc', 'tmp/netcdf_55.nc')
ds = gdal.OpenEx( 'tmp/netcdf_55.nc', gdal.OF_VECTOR | gdal.OF_UPDATE )
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('failure')
return 'fail'
f['twodimstringchar'] = 'abcd'
f.SetFID(-1)
f.ExportToJson()
src_json = f.ExportToJson()
if lyr.CreateFeature(f) != 0:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.OpenEx( 'tmp/netcdf_55.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
f.SetFID(-1)
out_json = f.ExportToJson()
f = None
ds = None
gdal.Unlink('tmp/netcdf_55.nc')
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
return 'success'
###############################################################################
# Test truncation of bidimensional char variables and WKT in a vector NetCDF 3 file
def netcdf_56():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_56.nc')
# Test auto-grow of WKT field
lyr = ds.CreateLayer('netcdf_56', options = [ 'AUTOGROW_STRINGS=NO', 'STRING_DEFAULT_WIDTH=5', 'WKT_DEFAULT_WIDTH=5' ] )
lyr.CreateField(ogr.FieldDefn('txt'))
f = ogr.Feature(lyr.GetLayerDefn())
f['txt'] = '0123456789'
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT (1 2)'))
with gdaltest.error_handler():
ret = lyr.CreateFeature(f)
if ret != 0:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.OpenEx( 'tmp/netcdf_56.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
if f['txt'] != '01234' or f.GetGeometryRef() is not None:
gdaltest.post_reason('failure')
f.DumpReadable()
return 'fail'
ds = None
gdal.Unlink('tmp/netcdf_56.nc')
return 'success'
###############################################################################
# Test one layer per file creation
def netcdf_57():
if gdaltest.netcdf_drv is None:
return 'skip'
try:
shutil.rmtree('tmp/netcdf_57')
except:
pass
with gdaltest.error_handler():
ds = ogr.GetDriverByName('netCDF').CreateDataSource('/not_existing_dir/invalid_subdir', options = ['MULTIPLE_LAYERS=SEPARATE_FILES'])
if ds is not None:
gdaltest.post_reason('failure')
return 'fail'
open('tmp/netcdf_57', 'wb').close()
with gdaltest.error_handler():
ds = ogr.GetDriverByName('netCDF').CreateDataSource('/not_existing_dir/invalid_subdir', options = ['MULTIPLE_LAYERS=SEPARATE_FILES'])
if ds is not None:
gdaltest.post_reason('failure')
return 'fail'
os.unlink('tmp/netcdf_57')
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_57', options = ['MULTIPLE_LAYERS=SEPARATE_FILES'])
for ilayer in range(2):
lyr = ds.CreateLayer('lyr%d' % ilayer)
lyr.CreateField(ogr.FieldDefn('lyr_id', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f['lyr_id'] = ilayer
lyr.CreateFeature(f)
ds = None
for ilayer in range(2):
ds = ogr.Open('tmp/netcdf_57/lyr%d.nc' % ilayer)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f['lyr_id'] != ilayer:
gdaltest.post_reason('failure')
return 'fail'
ds = None
shutil.rmtree('tmp/netcdf_57')
return 'success'
###############################################################################
# Test one layer per group (NC4)
def netcdf_58():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_58.nc', options = ['FORMAT=NC4', 'MULTIPLE_LAYERS=SEPARATE_GROUPS'])
for ilayer in range(2):
# Make sure auto-grow will happen to test this works well with multiple groups
lyr = ds.CreateLayer('lyr%d' % ilayer, geom_type = ogr.wkbNone, options = ['USE_STRING_IN_NC4=NO', 'STRING_DEFAULT_WIDTH=1' ])
lyr.CreateField(ogr.FieldDefn('lyr_id', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['lyr_id'] = 'lyr_%d' % ilayer
lyr.CreateFeature(f)
ds = None
ds = ogr.Open('tmp/netcdf_58.nc')
for ilayer in range(2):
lyr = ds.GetLayer(ilayer)
f = lyr.GetNextFeature()
if f['lyr_id'] != 'lyr_%d' % ilayer:
gdaltest.post_reason('failure')
return 'fail'
ds = None
gdal.Unlink('tmp/netcdf_58.nc')
return 'success'
###############################################################################
#check for UnitType set/get.
def netcdf_59():
if gdaltest.netcdf_drv is None:
return 'skip'
# get
ds = gdal.Open( 'data/unittype.nc' )
unit = ds.GetRasterBand( 1 ).GetUnitType();
if unit != 'm/s':
gdaltest.post_reason( 'Incorrect unit(%s)' % unit )
return 'fail'
ds = None
# set
tst = gdaltest.GDALTest( 'NetCDF', 'unittype.nc', 1, 4672 )
return tst.testSetUnitType()
###############################################################################
# Test reading a "Indexed ragged array representation of profiles" v1.6.0 H3.5
# http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#_indexed_ragged_array_representation_of_profiles
def netcdf_60():
if gdaltest.netcdf_drv is None:
return 'skip'
# Test that a vector cannot be opened in raster-only mode
ds = gdal.OpenEx( 'data/profile.nc', gdal.OF_RASTER )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx( 'data/profile.nc', gdal.OF_VECTOR)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
with gdaltest.error_handler():
gdal.VectorTranslate( '/vsimem/netcdf_60.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_60.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_60.csv')
return 'success'
###############################################################################
# Test appending to a "Indexed ragged array representation of profiles" v1.6.0 H3.5
def netcdf_61():
if gdaltest.netcdf_drv is None:
return 'skip'
shutil.copy('data/profile.nc', 'tmp/netcdf_61.nc')
ds = gdal.VectorTranslate( 'tmp/netcdf_61.nc', 'data/profile.nc', accessMode = 'append' )
gdal.VectorTranslate( '/vsimem/netcdf_61.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_61.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_61.csv')
gdal.Unlink('/vsimem/netcdf_61.nc')
return 'success'
###############################################################################
# Test creating a "Indexed ragged array representation of profiles" v1.6.0 H3.5
def netcdf_62():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.VectorTranslate( 'tmp/netcdf_62.nc', 'data/profile.nc', format = 'netCDF', layerCreationOptions = ['FEATURE_TYPE=PROFILE', 'PROFILE_DIM_INIT_SIZE=1', 'PROFILE_VARIABLES=station'] )
gdal.VectorTranslate( '/vsimem/netcdf_62.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_62.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_62.csv')
return 'success'
def netcdf_62_ncdump_check():
if gdaltest.netcdf_drv is None:
return 'skip'
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except:
err = None
if err is not None and 'netcdf library version' in err:
(ret, err) = gdaltest.runexternal_out_and_err( 'ncdump -h tmp/netcdf_62.nc' )
if ret.find('profile = 2') < 0 or \
ret.find('record = UNLIMITED') < 0 or \
ret.find('profile:cf_role = "profile_id"') < 0 or \
ret.find('parentIndex:instance_dimension = "profile"') < 0 or \
ret.find(':featureType = "profile"') < 0 or \
ret.find('char station(profile') < 0 or \
ret.find('char foo(record') < 0:
gdaltest.post_reason('failure')
print(ret)
return 'fail'
else:
return 'skip'
return 'success'
def netcdf_62_cf_check():
if gdaltest.netcdf_drv is None:
return 'skip'
import netcdf_cf
if netcdf_cf.netcdf_cf_setup() == 'success' and \
gdaltest.netcdf_cf_method is not None:
result_cf = netcdf_cf.netcdf_cf_check_file( 'tmp/netcdf_62.nc','auto',False )
if result_cf != 'success':
gdaltest.post_reason('failure')
return 'fail'
gdal.Unlink('/vsimem/netcdf_62.nc')
return 'success'
###############################################################################
# Test creating a NC4 "Indexed ragged array representation of profiles" v1.6.0 H3.5
def netcdf_63():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
shutil.copy('data/profile.nc', 'tmp/netcdf_63.nc')
ds = gdal.VectorTranslate( 'tmp/netcdf_63.nc', 'data/profile.nc', format = 'netCDF', datasetCreationOptions = ['FORMAT=NC4'], layerCreationOptions = ['FEATURE_TYPE=PROFILE', 'USE_STRING_IN_NC4=NO', 'STRING_DEFAULT_WIDTH=1' ] )
gdal.VectorTranslate( '/vsimem/netcdf_63.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_63.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_63.csv')
return 'success'
def netcdf_63_ncdump_check():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except:
err = None
if err is not None and 'netcdf library version' in err:
(ret, err) = gdaltest.runexternal_out_and_err( 'ncdump -h tmp/netcdf_63.nc' )
if ret.find('profile = UNLIMITED') < 0 or \
ret.find('record = UNLIMITED') < 0 or \
ret.find('profile:cf_role = "profile_id"') < 0 or \
ret.find('parentIndex:instance_dimension = "profile"') < 0 or \
ret.find(':featureType = "profile"') < 0 or \
ret.find('char station(record') < 0:
gdaltest.post_reason('failure')
print(ret)
return 'fail'
else:
gdal.Unlink('/vsimem/netcdf_63.nc')
return 'skip'
gdal.Unlink('/vsimem/netcdf_63.nc')
return 'success'
###############################################################################
# Test creating a "Indexed ragged array representation of profiles" v1.6.0 H3.5
# but without a profile field.
def netcdf_64():
if gdaltest.netcdf_drv is None:
return 'skip'
gdal.VectorTranslate( 'tmp/netcdf_64.nc', 'data/profile.nc', format = 'netCDF', selectFields = ['id,station,foo'], layerCreationOptions = ['FEATURE_TYPE=PROFILE', 'PROFILE_DIM_NAME=profile_dim', 'PROFILE_DIM_INIT_SIZE=1'] )
gdal.VectorTranslate( '/vsimem/netcdf_64.csv', 'tmp/netcdf_64.nc', format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_64.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile_dim,id,station,foo
"POINT Z (2 49 100)",0,1,Palo Alto,bar
"POINT Z (3 50 50)",1,2,Santa Fe,baz
"POINT Z (2 49 200)",0,3,Palo Alto,baw
"POINT Z (3 50 100)",1,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_64.csv')
gdal.Unlink('/vsimem/netcdf_64.nc')
return 'success'
###############################################################################
# Test creating a NC4 file with empty string fields / WKT fields
# (they must be filled as empty strings to avoid crashes in netcdf lib)
def netcdf_65():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_65.nc', options = ['FORMAT=NC4'])
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('str', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(f)
ds = None
ds = ogr.Open('tmp/netcdf_65.nc')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f['str'] != '':
gdaltest.post_reason('failure')
f.DumpReadable()
return 'fail'
ds = None
gdal.Unlink('tmp/netcdf_65.nc')
return 'success'
###############################################################################
# Test creating a "Indexed ragged array representation of profiles" v1.6.0 H3.5
# from a config file
def netcdf_66():
if gdaltest.netcdf_drv is None:
return 'skip'
# First trying with no so good configs
with gdaltest.error_handler():
gdal.VectorTranslate( 'tmp/netcdf_66.nc', 'data/profile.nc', format = 'netCDF', datasetCreationOptions = ['CONFIG_FILE=not_existing'] )
with gdaltest.error_handler():
gdal.VectorTranslate( 'tmp/netcdf_66.nc', 'data/profile.nc', format = 'netCDF', datasetCreationOptions = ['CONFIG_FILE=<Configuration>'] )
myconfig = \
"""<Configuration>
<!-- comment -->
<unrecognized_elt/>
<DatasetCreationOption/>
<DatasetCreationOption name="x"/>
<DatasetCreationOption value="x"/>
<LayerCreationOption/>
<LayerCreationOption name="x"/>
<LayerCreationOption value="x"/>
<Attribute/>
<Attribute name="foo"/>
<Attribute value="foo"/>
<Attribute name="foo" value="bar" type="unsupported"/>
<Field/>
<Field name="x">
<!-- comment -->
<unrecognized_elt/>
</Field>
<Field name="station" main_dim="non_existing"/>
<Layer/>
<Layer name="x">
<!-- comment -->
<unrecognized_elt/>
<LayerCreationOption/>
<LayerCreationOption name="x"/>
<LayerCreationOption value="x"/>
<Attribute/>
<Attribute name="foo"/>
<Attribute value="foo"/>
<Attribute name="foo" value="bar" type="unsupported"/>
<Field/>
</Layer>
</Configuration>
"""
with gdaltest.error_handler():
gdal.VectorTranslate( 'tmp/netcdf_66.nc', 'data/profile.nc', format = 'netCDF', datasetCreationOptions = ['CONFIG_FILE=' + myconfig] )
# Now with a correct configuration
myconfig = \
"""<Configuration>
<DatasetCreationOption name="WRITE_GDAL_TAGS" value="NO"/>
<LayerCreationOption name="STRING_DEFAULT_WIDTH" value="1"/>
<Attribute name="foo" value="bar"/>
<Attribute name="foo2" value="bar2"/>
<Field name="id">
<Attribute name="my_extra_attribute" value="5.23" type="double"/>
</Field>
<Field netcdf_name="lon"> <!-- edit predefined variable -->
<Attribute name="my_extra_lon_attribute" value="foo"/>
</Field>
<Layer name="profile" netcdf_name="my_profile">
<LayerCreationOption name="FEATURE_TYPE" value="PROFILE"/>
<LayerCreationOption name="RECORD_DIM_NAME" value="obs"/>
<Attribute name="foo" value="123" type="integer"/> <!-- override global one -->
<Field name="station" netcdf_name="my_station" main_dim="obs">
<Attribute name="long_name" value="my station attribute"/>
</Field>
<Field netcdf_name="lat"> <!-- edit predefined variable -->
<Attribute name="long_name" value=""/> <!-- remove predefined attribute -->
</Field>
</Layer>
</Configuration>
"""
gdal.VectorTranslate( 'tmp/netcdf_66.nc', 'data/profile.nc', format = 'netCDF', datasetCreationOptions = ['CONFIG_FILE=' + myconfig] )
gdal.VectorTranslate( '/vsimem/netcdf_66.csv', 'tmp/netcdf_66.nc', format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_66.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,my_station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_66.csv')
return 'success'
def netcdf_66_ncdump_check():
if gdaltest.netcdf_drv is None:
return 'skip'
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except:
err = None
if err is not None and 'netcdf library version' in err:
(ret, err) = gdaltest.runexternal_out_and_err( 'ncdump -h tmp/netcdf_66.nc' )
if ret.find('char my_station(obs, my_station_max_width)') < 0 or \
ret.find('my_station:long_name = "my station attribute"') < 0 or \
ret.find('lon:my_extra_lon_attribute = "foo"') < 0 or \
ret.find('lat:long_name') >= 0 or \
ret.find('id:my_extra_attribute = 5.23') < 0 or \
ret.find('profile:cf_role = "profile_id"') < 0 or \
ret.find('parentIndex:instance_dimension = "profile"') < 0 or \
ret.find(':featureType = "profile"') < 0:
gdaltest.post_reason('failure')
print(ret)
return 'fail'
else:
gdal.Unlink('/vsimem/netcdf_66.nc')
return 'skip'
gdal.Unlink('/vsimem/netcdf_66.nc')
return 'success'
###############################################################################
# ticket #5950: optimize IReadBlock() and CheckData() handling of partial
# blocks in the x axischeck for partial block reading.
def netcdf_67():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
try:
import numpy
except:
return 'skip'
# disable bottom-up mode to use the real file's blocks size
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', 'NO' )
# for the moment the next test using check_stat does not work, seems like
# the last pixel (9) of the image is not handled by stats...
# tst = gdaltest.GDALTest( 'NetCDF', 'partial_block_ticket5950.nc', 1, 45 )
# result = tst.testOpen( check_stat=(1, 9, 5, 2.582) )
# so for the moment compare the full image
ds = gdal.Open( 'data/partial_block_ticket5950.nc', gdal.GA_ReadOnly )
ref = numpy.arange(1, 10).reshape((3, 3))
if numpy.array_equal(ds.GetRasterBand(1).ReadAsArray(), ref):
result = 'success'
else:
result = 'fail'
ds = None
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', None )
return result
###############################################################################
# Test reading SRS from srid attribute (#6613)
def netcdf_68():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open('data/srid.nc')
wkt = ds.GetProjectionRef()
if wkt.find('6933') < 0:
gdaltest.post_reason('failure')
print(wkt)
return 'fail'
return 'success'
###############################################################################
# Test opening a dataset with a 1D variable with 0 record (#6645)
def netcdf_69():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open('data/test6645.nc')
if ds is None:
return 'fail'
return 'success'
###############################################################################
###############################################################################
# main tests list
gdaltest_list = [
netcdf_1,
netcdf_2,
netcdf_3,
netcdf_4,
netcdf_5,
netcdf_6,
netcdf_7,
netcdf_8,
netcdf_9,
netcdf_10,
netcdf_11,
netcdf_12,
netcdf_13,
netcdf_14,
netcdf_15,
netcdf_16,
netcdf_17,
netcdf_18,
netcdf_19,
netcdf_20,
netcdf_21,
netcdf_22,
netcdf_23,
netcdf_24,
netcdf_25,
netcdf_26,
netcdf_27,
netcdf_28,
netcdf_29,
netcdf_30,
netcdf_31,
netcdf_32,
netcdf_33,
netcdf_34,
netcdf_35,
netcdf_36,
netcdf_37,
netcdf_38,
netcdf_39,
netcdf_40,
netcdf_41,
netcdf_42,
netcdf_43,
netcdf_44,
netcdf_45,
netcdf_46,
netcdf_47,
netcdf_48,
netcdf_49,
netcdf_50,
netcdf_51,
netcdf_51_no_gdal_tags,
netcdf_52,
netcdf_53,
netcdf_54,
netcdf_55,
netcdf_56,
netcdf_57,
netcdf_58,
netcdf_59,
netcdf_60,
netcdf_61,
netcdf_62,
netcdf_62_ncdump_check,
netcdf_62_cf_check,
netcdf_63,
netcdf_63_ncdump_check,
netcdf_64,
netcdf_65,
netcdf_66,
netcdf_66_ncdump_check,
netcdf_67,
netcdf_68,
netcdf_69
]
###############################################################################
# basic file creation tests
init_list = [ \
('byte.tif', 1, 4672, None, []),
('byte_signed.tif', 1, 4672, None, ['PIXELTYPE=SIGNEDBYTE']),
('int16.tif', 1, 4672, None, []),
('int32.tif', 1, 4672, None, []),
('float32.tif', 1, 4672, None, []),
('float64.tif', 1, 4672, None, [])
]
# Some tests we don't need to do for each type.
item = init_list[0]
ut = gdaltest.GDALTest( 'netcdf', item[0], item[1], item[2], options=item[4] )
#test geotransform and projection
gdaltest_list.append( (ut.testSetGeoTransform, item[0]) )
gdaltest_list.append( (ut.testSetProjection, item[0]) )
#SetMetadata() not supported
#gdaltest_list.append( (ut.testSetMetadata, item[0]) )
# Others we do for each pixel type.
for item in init_list:
ut = gdaltest.GDALTest( 'netcdf', item[0], item[1], item[2], options=item[4] )
if ut is None:
print( 'GTiff tests skipped' )
gdaltest_list.append( (ut.testCreateCopy, item[0]) )
gdaltest_list.append( (ut.testCreate, item[0]) )
gdaltest_list.append( (ut.testSetNoDataValue, item[0]) )
###############################################################################
# other tests
if __name__ == '__main__':
gdaltest.setup_run( 'netcdf' )
gdaltest.run_tests( gdaltest_list )
#make sure we cleanup
gdaltest.clean_tmp()
gdaltest.summarize()
|
kb_ReadsUtilitiesServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from kb_ReadsUtilities.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_ReadsUtilities'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_ReadsUtilities.kb_ReadsUtilitiesImpl import kb_ReadsUtilities # noqa @IgnorePep8
impl_kb_ReadsUtilities = kb_ReadsUtilities(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_ReadsUtilities'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_ReadsUtilities.KButil_FASTQ_to_FASTA,
name='kb_ReadsUtilities.KButil_FASTQ_to_FASTA',
types=[dict])
self.method_authentication['kb_ReadsUtilities.KButil_FASTQ_to_FASTA'] = 'required' # noqa
self.rpc_service.add(impl_kb_ReadsUtilities.KButil_Split_Reads,
name='kb_ReadsUtilities.KButil_Split_Reads',
types=[dict])
self.method_authentication['kb_ReadsUtilities.KButil_Split_Reads'] = 'required' # noqa
self.rpc_service.add(impl_kb_ReadsUtilities.KButil_Random_Subsample_Reads,
name='kb_ReadsUtilities.KButil_Random_Subsample_Reads',
types=[dict])
self.method_authentication['kb_ReadsUtilities.KButil_Random_Subsample_Reads'] = 'required' # noqa
self.rpc_service.add(impl_kb_ReadsUtilities.KButil_Merge_ReadsSet_to_OneLibrary,
name='kb_ReadsUtilities.KButil_Merge_ReadsSet_to_OneLibrary',
types=[dict])
self.method_authentication['kb_ReadsUtilities.KButil_Merge_ReadsSet_to_OneLibrary'] = 'required' # noqa
self.rpc_service.add(impl_kb_ReadsUtilities.KButil_Merge_MultipleReadsLibs_to_OneLibrary,
name='kb_ReadsUtilities.KButil_Merge_MultipleReadsLibs_to_OneLibrary',
types=[dict])
self.method_authentication['kb_ReadsUtilities.KButil_Merge_MultipleReadsLibs_to_OneLibrary'] = 'required' # noqa
self.rpc_service.add(impl_kb_ReadsUtilities.KButil_Extract_Unpaired_Reads_and_Synchronize_Pairs,
name='kb_ReadsUtilities.KButil_Extract_Unpaired_Reads_and_Synchronize_Pairs',
types=[dict])
self.method_authentication['kb_ReadsUtilities.KButil_Extract_Unpaired_Reads_and_Synchronize_Pairs'] = 'required' # noqa
self.rpc_service.add(impl_kb_ReadsUtilities.KButil_Translate_ReadsLibs_QualScores,
name='kb_ReadsUtilities.KButil_Translate_ReadsLibs_QualScores',
types=[dict])
self.method_authentication['kb_ReadsUtilities.KButil_Translate_ReadsLibs_QualScores'] = 'required' # noqa
self.rpc_service.add(impl_kb_ReadsUtilities.KButil_AddInsertLen_to_ReadsLibs,
name='kb_ReadsUtilities.KButil_AddInsertLen_to_ReadsLibs',
types=[dict])
self.method_authentication['kb_ReadsUtilities.KButil_AddInsertLen_to_ReadsLibs'] = 'required' # noqa
self.rpc_service.add(impl_kb_ReadsUtilities.status,
name='kb_ReadsUtilities.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_ReadsUtilities ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
power_monitoring.py | import random
import threading
import time
from statistics import mean
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
PANDA_OUTPUT_VOLTAGE = 5.28
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0
MAX_TIME_OFFROAD_S = 30*3600
# Parameters
def get_battery_capacity():
return _read_param("/sys/class/power_supply/battery/capacity", int)
# Helpers
def _read_param(path, parser, default=0):
try:
with open(path) as f:
return parser(f.read())
except Exception:
return default
def panda_current_to_actual_current(panda_current):
# From white/grey panda schematic
return (3.3 - (panda_current * 3.3 / 4096)) / 8.25
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of pandaState voltage
self.integration_lock = threading.Lock()
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, pandaState):
try:
now = sec_since_boot()
# If pandaState is None, we're probably not in a car, so we don't care
if pandaState is None or pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_mV = ((pandaState.pandaState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if (pandaState.pandaState.ignitionLine or pandaState.pandaState.ignitionCan):
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = pandaState.pandaState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (pandaState.pandaState.pandaType in [log.PandaState.PandaType.whitePanda, log.PandaState.PandaType.greyPanda]) and (pandaState.pandaState.current > 1):
# If white/grey panda, use the integrated current measurements if the measurement is not 0
# If the measurement is 0, the current is 400mA or greater, and out of the measurement range of the panda
# This seems to be accurate to about 5%
current_power = (PANDA_OUTPUT_VOLTAGE * panda_current_to_actual_current(pandaState.pandaState.current))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t, current_power):
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self):
return int(self.power_used_uWh)
def get_car_battery_capacity(self):
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, pandaState, offroad_timestamp):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= (not pandaState.pandaState.ignitionLine and not pandaState.pandaState.ignitionCan)
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, pandaState, offroad_timestamp, started_seen):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (pandaState.pandaState.usbPowerMode != log.PandaState.UsbPowerMode.client)
BATT_PERC_OFF = 10
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(pandaState, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen
return should_shutdown
|
pystress.py | __version__ = '0.2.1'
from multiprocessing import Process, active_children, cpu_count, Pipe
import os
import signal
import sys
import time
FIB_N = 100
DEFAULT_TIME = 60
try:
DEFAULT_CPU = cpu_count()
except NotImplementedError:
DEFAULT_CPU = 1
def loop(conn):
proc_info = os.getpid()
conn.send(proc_info)
conn.close()
while True:
fib(FIB_N)
def fib(n):
if n < 2:
return 1
else:
return fib(n - 1) + fib(n - 2)
def sigint_handler(signum, frame):
procs = active_children()
for p in procs:
p.terminate()
os._exit(1)
signal.signal(signal.SIGINT, sigint_handler)
def get_args():
exec_time = DEFAULT_TIME
proc_num = DEFAULT_CPU
if len(sys.argv) > 3:
raise
if len(sys.argv) == 2:
exec_time = int(sys.argv[1])
if len(sys.argv) == 3:
exec_time = int(sys.argv[1])
proc_num = int(sys.argv[2])
return exec_time, proc_num
def _main():
try:
exec_time, proc_num = get_args()
except:
msg = "Usage: pystress [exec_time] [proc_num]\n"
sys.stderr.write(msg)
sys.exit(1)
procs = []
conns = []
for i in range(proc_num):
parent_conn, child_conn = Pipe()
p = Process(target=loop, args=(child_conn,))
p.start()
procs.append(p)
conns.append(parent_conn)
for conn in conns:
try:
print conn.recv()
except EOFError:
continue
time.sleep(exec_time)
for p in procs:
p.terminate()
if __name__ == "__main__":
_main()
|
profiling.py | import json
import logging
import os
import socket
import threading
import tensorflow as tf
from tensorflow.python.client import timeline
if tf.__version__[0] == '2':
logging.info('Adjusting for tensorflow 2.0')
tf = tf.compat.v1
tf.disable_eager_execution()
class Timeliner:
_timeline_dict = None
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
def update_timeline(self, chrome_trace):
# convert chrome trace to python dict
chrome_trace_dict = json.loads(chrome_trace)
# for first run store full trace
if self._timeline_dict is None:
self._timeline_dict = chrome_trace_dict
# for other - update only time consumption, not definitions
else:
for event in chrome_trace_dict['traceEvents']:
# events time consumption started with 'ts' prefix
if 'ts' in event:
self._timeline_dict['traceEvents'].append(event)
def save(self, f_name):
os.path.isdir(os.path.dirname(f_name)) or os.makedirs(os.path.dirname(f_name))
with open(f_name, 'w') as f:
json.dump(self._timeline_dict, f)
def add_run(self, run_metadata=None):
if run_metadata is None:
run_metadata = self.run_metadata
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
self.update_timeline(chrome_trace)
def launch_tensorboard(log_dir, same_process=False, port=6006):
if port is None:
port = 6006
if same_process:
from tensorboard import main as tb
tf.flags.FLAGS.logdir = log_dir
tf.flags.FLAGS.reload_interval = 1
tf.flags.FLAGS.port = port
threading.Thread(target=tb.main).start()
else:
def run_tb():
os.system('tensorboard --logdir=%s --port=%d' % (log_dir,port))
threading.Thread(target=run_tb).start()
try:
import phi.local.hostname
host = phi.local.hostname.hostname
except:
host = 'localhost' # socket.gethostname()
url = "http://%s:%d/" % (host,port)
return url
|
test_selenium.py | import re
import threading
import time
import unittest
from selenium import webdriver
from app import create_app, db
from app.models import Role, User, Post
class SeleniumTestCase(unittest.TestCase):
client = None
@classmethod
def setUpClass(cls):
# start Chrome
try:
cls.client = webdriver.Chrome()
except:
pass
# skip these tests if the browser could not be started
if cls.client:
# create the application
cls.app = create_app('testing')
cls.app_context = cls.app.app_context()
cls.app_context.push()
# suppress logging to keep unittest output clean
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel("ERROR")
# create the database and populate with some fake data
db.create_all()
Role.insert_roles()
User.generate_fake(10)
Post.generate_fake(10)
# add an administrator user
admin_role = Role.query.filter_by(permissions=0xff).first()
admin = User(email='john@example.com',
username='john', password='cat',
role=admin_role, confirmed=True)
db.session.add(admin)
db.session.commit()
# start the Flask server in a thread
threading.Thread(target=cls.app.run).start()
# give the server a second to ensure it is up
time.sleep(1)
@classmethod
def tearDownClass(cls):
if cls.client:
# stop the flask server and the browser
cls.client.get('http://localhost:5000/shutdown')
cls.client.close()
# destroy database
db.drop_all()
db.session.remove()
# remove application context
cls.app_context.pop()
def setUp(self):
if not self.client:
self.skipTest('Web browser not available')
def tearDown(self):
pass
def test_admin_home_page(self):
# navigate to home page
self.client.get('http://localhost:5000/')
self.assertTrue(re.search('Hello,\s+Stranger!',
self.client.page_source))
# navigate to login page
self.client.find_element_by_link_text('Log In').click()
self.assertTrue('<h1>Login</h1>' in self.client.page_source)
# login
self.client.find_element_by_name('email').\
send_keys('john@example.com')
self.client.find_element_by_name('password').send_keys('cat')
self.client.find_element_by_name('submit').click()
self.assertTrue(re.search('Hello,\s+john!', self.client.page_source))
# navigate to the user's profile page
self.client.find_element_by_link_text('Profile').click()
self.assertTrue('<h1>john</h1>' in self.client.page_source)
|
regrtest.py | #! /usr/bin/env python3
"""
Usage:
python -m test [options] [test_name1 [test_name2 ...]]
python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
If no arguments or options are provided, finds all files matching
the pattern "test_*" in the Lib/test subdirectory and runs
them in alphabetical order (but see -M and -u, below, for exceptions).
For more rigorous testing, it is useful to use the following
command line:
python -E -Wd -m test [options] [test_name1 ...]
Options:
-h/--help -- print this text and exit
--timeout TIMEOUT
-- dump the traceback and exit if a test takes more
than TIMEOUT seconds; disabled if TIMEOUT is negative
or equals to zero
--wait -- wait for user input, e.g., allow a debugger to be attached
Verbosity
-v/--verbose -- run tests in verbose mode with output to stdout
-w/--verbose2 -- re-run failed tests in verbose mode
-W/--verbose3 -- display test output on failure
-d/--debug -- print traceback for failed tests
-q/--quiet -- no output unless one or more tests fail
-o/--slow -- print the slowest 10 tests
--header -- print header with interpreter info
Selecting tests
-r/--randomize -- randomize test execution order (see below)
--randseed -- pass a random seed to reproduce a previous random run
-f/--fromfile -- read names of tests to run from a file (see below)
-x/--exclude -- arguments are tests to *exclude*
-s/--single -- single step through a set of tests (see below)
-m/--match PAT -- match test cases and methods with glob pattern PAT
-G/--failfast -- fail as soon as a test fails (only with -v or -W)
-u/--use RES1,RES2,...
-- specify which special resource intensive tests to run
-M/--memlimit LIMIT
-- run very large memory-consuming tests
--testdir DIR
-- execute test files in the specified directory (instead
of the Python stdlib test suite)
Special runs
-l/--findleaks -- if GC is available detect tests that leak memory
-L/--runleaks -- run the leaks(1) command just before exit
-R/--huntrleaks RUNCOUNTS
-- search for reference leaks (needs debug build, v. slow)
-j/--multiprocess PROCESSES
-- run PROCESSES processes at once
-T/--coverage -- turn on code coverage tracing using the trace module
-D/--coverdir DIRECTORY
-- Directory where coverage files are put
-N/--nocoverdir -- Put coverage files alongside modules
-t/--threshold THRESHOLD
-- call gc.set_threshold(THRESHOLD)
-n/--nowindows -- suppress error message boxes on Windows
-F/--forever -- run the specified tests in a loop, until an error happens
Additional Option Details:
-r randomizes test execution order. You can use --randseed=int to provide a
int seed value for the randomizer; this is useful for reproducing troublesome
test orders.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
the next test is recorded in a file named pynexttest. If run from the
Python build directory, pynexttest is located in the 'build' subdirectory,
otherwise it is located in tempfile.gettempdir(). On subsequent runs,
the test in pynexttest is run, and the next test is written to pynexttest.
When the last test has been run, pynexttest is deleted. In this way it
is possible to single step through the test files. This is useful when
doing memory analysis on the Python interpreter, which process tends to
consume too many resources to run the full regression test non-stop.
-S is used to continue running tests after an aborted run. It will
maintain the order a standard run (ie, this assumes -r is not used).
This is useful after the tests have prematurely stopped for some external
reason and you want to start running from where you left off rather
than starting from the beginning.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
none - Disable all special resources (this is the default).
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
cpu - Used for certain CPU-heavy tests.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
gui - Run tests that require a running GUI.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the gui tests, give the
option '-uall,-gui'.
"""
# We import importlib *ASAP* in order to test #15386
import importlib
import builtins
import faulthandler
import getopt
import io
import json
import logging
import os
import platform
import random
import re
import shutil
import signal
import sys
import sysconfig
import tempfile
import time
import traceback
import unittest
import warnings
from inspect import isabstract
try:
import threading
except ImportError:
threading = None
try:
import multiprocessing.process
except ImportError:
multiprocessing = None
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
# they are found in the CWD their __file__ and __path__ will be relative (this
# happens before the chdir). All the modules imported after the chdir, are
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
# the packages to prevent later imports to fail when the CWD is different.
for module in sys.modules.values():
if hasattr(module, '__path__'):
module.__path__ = [os.path.abspath(path) for path in module.__path__]
if hasattr(module, '__file__'):
module.__file__ = os.path.abspath(module.__file__)
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
CHILD_ERROR = -5 # error in a child process
from test import support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui')
TEMPDIR = os.path.abspath(tempfile.gettempdir())
def usage(msg):
print(msg, file=sys.stderr)
print("Use --help for usage", file=sys.stderr)
sys.exit(2)
def main(tests=None, testdir=None, verbose=0, quiet=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None, use_mp=None, verbose3=False, forever=False,
header=False, failfast=False, match_tests=None):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, findleaks, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
# Display the Python traceback on fatal errors (e.g. segfault)
faulthandler.enable(all_threads=True)
# Display the Python traceback on SIGALRM or SIGUSR1 signal
signals = []
if hasattr(signal, 'SIGALRM'):
signals.append(signal.SIGALRM)
if hasattr(signal, 'SIGUSR1'):
signals.append(signal.SIGUSR1)
for signum in signals:
faulthandler.register(signum, chain=True)
replace_stdout()
support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvqxsoS:rf:lu:t:TD:NLR:FdwWM:nj:Gm:',
['help', 'verbose', 'verbose2', 'verbose3', 'quiet',
'exclude', 'single', 'slow', 'randomize', 'fromfile=', 'findleaks',
'use=', 'threshold=', 'coverdir=', 'nocoverdir',
'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=',
'multiprocess=', 'coverage', 'slaveargs=', 'forever', 'debug',
'start=', 'nowindows', 'header', 'testdir=', 'timeout=', 'wait',
'failfast', 'match='])
except getopt.error as msg:
usage(msg)
# Defaults
if random_seed is None:
random_seed = random.randrange(10000000)
if use_resources is None:
use_resources = []
debug = False
start = None
timeout = None
for o, a in opts:
if o in ('-h', '--help'):
print(__doc__)
return
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-w', '--verbose2'):
verbose2 = True
elif o in ('-d', '--debug'):
debug = True
elif o in ('-W', '--verbose3'):
verbose3 = True
elif o in ('-G', '--failfast'):
failfast = True
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
elif o in ('-x', '--exclude'):
exclude = True
elif o in ('-S', '--start'):
start = a
elif o in ('-s', '--single'):
single = True
elif o in ('-o', '--slow'):
print_slow = True
elif o in ('-r', '--randomize'):
randomize = True
elif o == '--randseed':
randomize = True
random_seed = int(a)
elif o in ('-f', '--fromfile'):
fromfile = a
elif o in ('-m', '--match'):
match_tests = a
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
runleaks = True
elif o in ('-t', '--threshold'):
import gc
gc.set_threshold(int(a))
elif o in ('-T', '--coverage'):
trace = True
elif o in ('-D', '--coverdir'):
# CWD is replaced with a temporary dir before calling main(), so we
# need join it with the saved CWD so it goes where the user expects.
coverdir = os.path.join(support.SAVEDCWD, a)
elif o in ('-N', '--nocoverdir'):
coverdir = None
elif o in ('-R', '--huntrleaks'):
huntrleaks = a.split(':')
if len(huntrleaks) not in (2, 3):
print(a, huntrleaks)
usage('-R takes 2 or 3 colon-separated arguments')
if not huntrleaks[0]:
huntrleaks[0] = 5
else:
huntrleaks[0] = int(huntrleaks[0])
if not huntrleaks[1]:
huntrleaks[1] = 4
else:
huntrleaks[1] = int(huntrleaks[1])
if len(huntrleaks) == 2 or not huntrleaks[2]:
huntrleaks[2:] = ["reflog.txt"]
# Avoid false positives due to various caches
# filling slowly with random data:
warm_caches()
elif o in ('-M', '--memlimit'):
support.set_memlimit(a)
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
use_resources[:] = RESOURCE_NAMES
continue
if r == 'none':
del use_resources[:]
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if r not in RESOURCE_NAMES:
usage('Invalid -u/--use option: ' + a)
if remove:
if r in use_resources:
use_resources.remove(r)
elif r not in use_resources:
use_resources.append(r)
elif o in ('-n', '--nowindows'):
import msvcrt
msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
msvcrt.SEM_NOGPFAULTERRORBOX|
msvcrt.SEM_NOOPENFILEERRORBOX)
try:
msvcrt.CrtSetReportMode
except AttributeError:
# release build
pass
else:
for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
elif o in ('-F', '--forever'):
forever = True
elif o in ('-j', '--multiprocess'):
use_mp = int(a)
if use_mp <= 0:
try:
import multiprocessing
# Use all cores + extras for tests that like to sleep
use_mp = 2 + multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
use_mp = 3
if use_mp == 1:
use_mp = None
elif o == '--header':
header = True
elif o == '--slaveargs':
args, kwargs = json.loads(a)
try:
result = runtest(*args, **kwargs)
except KeyboardInterrupt:
result = INTERRUPTED, ''
except BaseException as e:
traceback.print_exc()
result = CHILD_ERROR, str(e)
sys.stdout.flush()
print() # Force a newline (just in case)
print(json.dumps(result))
sys.exit(0)
elif o == '--testdir':
# CWD is replaced with a temporary dir before calling main(), so we
# join it with the saved CWD so it ends up where the user expects.
testdir = os.path.join(support.SAVEDCWD, a)
elif o == '--timeout':
if hasattr(faulthandler, 'dump_traceback_later'):
timeout = float(a)
if timeout <= 0:
timeout = None
else:
print("Warning: The timeout option requires "
"faulthandler.dump_traceback_later")
timeout = None
elif o == '--wait':
input("Press any key to continue...")
else:
print(("No handler for option {}. Please report this as a bug "
"at http://bugs.python.org.").format(o), file=sys.stderr)
sys.exit(1)
if single and fromfile:
usage("-s and -f don't go together!")
if use_mp and trace:
usage("-T and -j don't go together!")
if use_mp and findleaks:
usage("-l and -j don't go together!")
if use_mp and support.max_memuse:
usage("-M and -j don't go together!")
if failfast and not (verbose or verbose3):
usage("-G/--failfast needs either -v or -W")
good = []
bad = []
skipped = []
resource_denieds = []
environment_changed = []
interrupted = False
if findleaks:
try:
import gc
except ImportError:
print('No GC available, disabling findleaks.')
findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if single:
filename = os.path.join(TEMPDIR, 'pynexttest')
try:
fp = open(filename, 'r')
next_test = fp.read().strip()
tests = [next_test]
fp.close()
except IOError:
pass
if fromfile:
tests = []
fp = open(os.path.join(support.SAVEDCWD, fromfile))
count_pat = re.compile(r'\[\s*\d+/\s*\d+\]')
for line in fp:
line = count_pat.sub('', line)
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
fp.close()
# Strip .py extensions.
removepy(args)
removepy(tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if exclude:
for arg in args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
args = []
# For a partial run, we do not need to clutter the output.
if verbose or header or not (quiet or single or tests or args):
# Print basic platform information
print("==", platform.python_implementation(), *sys.version.split())
print("== ", platform.platform(aliased=True),
"%s-endian" % sys.byteorder)
print("== ", os.getcwd())
print("Testing with flags:", sys.flags)
# if testdir is set, then we are not running the python tests suite, so
# don't add default tests to be executed or skipped (pass empty values)
if testdir:
alltests = findtests(testdir, list(), set())
else:
alltests = findtests(testdir, stdtests, nottests)
selected = tests or args or alltests
if single:
selected = selected[:1]
try:
next_single_test = alltests[alltests.index(selected[0])+1]
except IndexError:
next_single_test = None
# Remove all the selected tests that precede start if it's set.
if start:
try:
del selected[:selected.index(start)]
except ValueError:
print("Couldn't find starting test (%s), using all tests" % start)
if randomize:
random.seed(random_seed)
print("Using random seed", random_seed)
random.shuffle(selected)
if trace:
import trace, tempfile
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,
tempfile.gettempdir()],
trace=False, count=True)
test_times = []
support.verbose = verbose # Tell tests to be moderately quiet
support.use_resources = use_resources
save_modules = sys.modules.keys()
def accumulate_result(test, result):
ok, test_time = result
test_times.append((test_time, test))
if ok == PASSED:
good.append(test)
elif ok == FAILED:
bad.append(test)
elif ok == ENV_CHANGED:
environment_changed.append(test)
elif ok == SKIPPED:
skipped.append(test)
elif ok == RESOURCE_DENIED:
skipped.append(test)
resource_denieds.append(test)
if forever:
def test_forever(tests=list(selected)):
while True:
for test in tests:
yield test
if bad:
return
tests = test_forever()
test_count = ''
test_count_width = 3
else:
tests = iter(selected)
test_count = '/{}'.format(len(selected))
test_count_width = len(test_count) - 1
if use_mp:
try:
from threading import Thread
except ImportError:
print("Multiprocess option requires thread support")
sys.exit(2)
from queue import Queue
from subprocess import Popen, PIPE
debug_output_pat = re.compile(r"\[\d+ refs\]$")
output = Queue()
pending = MultiprocessTests(tests)
opt_args = support.args_from_interpreter_flags()
base_cmd = [sys.executable] + opt_args + ['-m', 'test.regrtest']
def work():
# A worker thread.
try:
while True:
try:
test = next(pending)
except StopIteration:
output.put((None, None, None, None))
return
args_tuple = (
(test, verbose, quiet),
dict(huntrleaks=huntrleaks, use_resources=use_resources,
debug=debug, output_on_failure=verbose3,
timeout=timeout, failfast=failfast,
match_tests=match_tests)
)
# -E is needed by some tests, e.g. test_import
# Running the child from the same working directory ensures
# that TEMPDIR for the child is the same when
# sysconfig.is_python_build() is true. See issue 15300.
popen = Popen(base_cmd + ['--slaveargs', json.dumps(args_tuple)],
stdout=PIPE, stderr=PIPE,
universal_newlines=True,
close_fds=(os.name != 'nt'),
cwd=support.SAVEDCWD)
stdout, stderr = popen.communicate()
retcode = popen.wait()
# Strip last refcount output line if it exists, since it
# comes from the shutdown of the interpreter in the subcommand.
stderr = debug_output_pat.sub("", stderr)
stdout, _, result = stdout.strip().rpartition("\n")
if retcode != 0:
result = (CHILD_ERROR, "Exit code %s" % retcode)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
return
if not result:
output.put((None, None, None, None))
return
result = json.loads(result)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
except BaseException:
output.put((None, None, None, None))
raise
workers = [Thread(target=work) for i in range(use_mp)]
for worker in workers:
worker.start()
finished = 0
test_index = 1
try:
while finished < use_mp:
test, stdout, stderr, result = output.get()
if test is None:
finished += 1
continue
accumulate_result(test, result)
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count,
len(bad), test))
if stdout:
print(stdout)
if stderr:
print(stderr, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
if result[0] == INTERRUPTED:
raise KeyboardInterrupt
if result[0] == CHILD_ERROR:
raise Exception("Child error on {}: {}".format(test, result[1]))
test_index += 1
except KeyboardInterrupt:
interrupted = True
pending.interrupted = True
for worker in workers:
worker.join()
else:
for test_index, test in enumerate(tests, 1):
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count, len(bad), test))
sys.stdout.flush()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, verbose, quiet, timeout=timeout)',
globals=globals(), locals=vars())
else:
try:
result = runtest(test, verbose, quiet, huntrleaks, debug,
output_on_failure=verbose3,
timeout=timeout, failfast=failfast,
match_tests=match_tests)
accumulate_result(test, result)
except KeyboardInterrupt:
interrupted = True
break
except:
raise
if findleaks:
gc.collect()
if gc.garbage:
print("Warning: test created", len(gc.garbage), end=' ')
print("uncollectable object(s).")
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
support.unload(module)
if interrupted:
# print a newline after ^C
print()
print("Test suite interrupted by signal SIGINT.")
omitted = set(selected) - set(good) - set(bad) - set(skipped)
print(count(len(omitted), "test"), "omitted:")
printlist(omitted)
if good and not quiet:
if not bad and not skipped and not interrupted and len(good) > 1:
print("All", end=' ')
print(count(len(good), "test"), "OK.")
if print_slow:
test_times.sort(reverse=True)
print("10 slowest tests:")
for time, test in test_times[:10]:
print("%s: %.1fs" % (test, time))
if bad:
bad = sorted(set(bad) - set(environment_changed))
if bad:
print(count(len(bad), "test"), "failed:")
printlist(bad)
if environment_changed:
print("{} altered the execution environment:".format(
count(len(environment_changed), "test")))
printlist(environment_changed)
if skipped and not quiet:
print(count(len(skipped), "test"), "skipped:")
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print(count(len(surprise), "skip"), \
"unexpected on", plat + ":")
printlist(surprise)
else:
print("Those skips are all expected on", plat + ".")
else:
print("Ask someone to teach regrtest.py about which tests are")
print("expected to get skipped on", plat + ".")
if verbose2 and bad:
print("Re-running failed tests in verbose mode")
for test in bad:
print("Re-running test %r in verbose mode" % test)
sys.stdout.flush()
try:
verbose = True
ok = runtest(test, True, quiet, huntrleaks, debug, timeout=timeout)
except KeyboardInterrupt:
# print a newline separate from the ^C
print()
break
except:
raise
if single:
if next_single_test:
with open(filename, 'w') as fp:
fp.write(next_single_test + '\n')
else:
os.unlink(filename)
if trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0 or interrupted)
# small set of tests to determine if we have a basically functioning interpreter
# (i.e. if any of these fail, then anything else is likely to follow)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
'test_support'
]
# set of tests that we don't want to be executed when using regrtest
NOTTESTS = set()
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
names = os.listdir(testdir)
tests = []
others = set(stdtests) | nottests
for name in names:
mod, ext = os.path.splitext(name)
if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
tests.append(mod)
return stdtests + sorted(tests)
# We do not use a generator so multiple threads can call next().
class MultiprocessTests(object):
"""A thread-safe iterator over tests for multiprocess mode."""
def __init__(self, tests):
self.interrupted = False
self.lock = threading.Lock()
self.tests = tests
def __iter__(self):
return self
def __next__(self):
with self.lock:
if self.interrupted:
raise StopIteration('tests interrupted')
return next(self.tests)
def replace_stdout():
"""Set stdout encoder error handler to backslashreplace (as stderr error
handler) to avoid UnicodeEncodeError when printing a traceback"""
import atexit
stdout = sys.stdout
sys.stdout = open(stdout.fileno(), 'w',
encoding=stdout.encoding,
errors="backslashreplace",
closefd=False,
newline='\n')
def restore_stdout():
sys.stdout.close()
sys.stdout = stdout
atexit.register(restore_stdout)
def runtest(test, verbose, quiet,
huntrleaks=False, debug=False, use_resources=None,
output_on_failure=False, failfast=False, match_tests=None,
timeout=None):
"""Run a single test.
test -- the name of the test
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
test_times -- a list of (time, test_name) pairs
huntrleaks -- run multiple times to test for leaks; requires a debug
build; a triple corresponding to -R's three arguments
output_on_failure -- if true, display test output on failure
timeout -- dump the traceback and exit if a test takes more than
timeout seconds
Returns one of the test result constants:
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
"""
if use_resources is not None:
support.use_resources = use_resources
use_timeout = (timeout is not None)
if use_timeout:
faulthandler.dump_traceback_later(timeout, exit=True)
try:
support.match_tests = match_tests
if failfast:
support.failfast = True
if output_on_failure:
support.verbose = True
# Reuse the same instance to all calls to runtest(). Some
# tests keep a reference to sys.stdout or sys.stderr
# (eg. test_argparse).
if runtest.stringio is None:
stream = io.StringIO()
runtest.stringio = stream
else:
stream = runtest.stringio
stream.seek(0)
stream.truncate()
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = stream
sys.stderr = stream
result = runtest_inner(test, verbose, quiet, huntrleaks,
debug, display_failure=False)
if result[0] == FAILED:
output = stream.getvalue()
orig_stderr.write(output)
orig_stderr.flush()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
else:
support.verbose = verbose # Tell tests to be moderately quiet
result = runtest_inner(test, verbose, quiet, huntrleaks, debug,
display_failure=not verbose)
return result
finally:
if use_timeout:
faulthandler.cancel_dump_traceback_later()
cleanup_test_droppings(test, verbose)
runtest.stringio = None
# Unit tests are supposed to leave the execution environment unchanged
# once they complete. But sometimes tests have bugs, especially when
# tests fail, and the changes to environment go on to mess up other
# tests. This can cause issues with buildbot stability, since tests
# are run in random order and so problems may appear to come and go.
# There are a few things we can save and restore to mitigate this, and
# the following context manager handles this task.
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
with saved_test_environment(testname, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
the saved items was changed by the test. The attribute 'changed'
is initially False, but is set to True if a change is detected.
If verbose is more than 1, the before and after state of changed
items is also printed.
"""
changed = False
def __init__(self, testname, verbose=0, quiet=False):
self.testname = testname
self.verbose = verbose
self.quiet = quiet
# To add things to save and restore, add a name XXX to the resources list
# and add corresponding get_XXX/restore_XXX functions. get_XXX should
# return the value to be saved and compared against a second call to the
# get function when test execution completes. restore_XXX should accept
# the saved value and restore the resource using it. It will be called if
# and only if a change in the value is detected.
#
# Note: XXX will have any '.' replaced with '_' characters when determining
# the corresponding method names.
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
'os.environ', 'sys.path', 'sys.path_hooks', '__import__',
'warnings.filters', 'asyncore.socket_map',
'logging._handlers', 'logging._handlerList', 'sys.gettrace',
'sys.warnoptions', 'threading._dangling',
'multiprocessing.process._dangling',
'sysconfig._CONFIG_VARS', 'sysconfig._INSTALL_SCHEMES',
'support.TESTFN',
)
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
def restore_sys_argv(self, saved_argv):
sys.argv = saved_argv[1]
sys.argv[:] = saved_argv[2]
def get_cwd(self):
return os.getcwd()
def restore_cwd(self, saved_cwd):
os.chdir(saved_cwd)
def get_sys_stdout(self):
return sys.stdout
def restore_sys_stdout(self, saved_stdout):
sys.stdout = saved_stdout
def get_sys_stderr(self):
return sys.stderr
def restore_sys_stderr(self, saved_stderr):
sys.stderr = saved_stderr
def get_sys_stdin(self):
return sys.stdin
def restore_sys_stdin(self, saved_stdin):
sys.stdin = saved_stdin
def get_os_environ(self):
return id(os.environ), os.environ, dict(os.environ)
def restore_os_environ(self, saved_environ):
os.environ = saved_environ[1]
os.environ.clear()
os.environ.update(saved_environ[2])
def get_sys_path(self):
return id(sys.path), sys.path, sys.path[:]
def restore_sys_path(self, saved_path):
sys.path = saved_path[1]
sys.path[:] = saved_path[2]
def get_sys_path_hooks(self):
return id(sys.path_hooks), sys.path_hooks, sys.path_hooks[:]
def restore_sys_path_hooks(self, saved_hooks):
sys.path_hooks = saved_hooks[1]
sys.path_hooks[:] = saved_hooks[2]
def get_sys_gettrace(self):
return sys.gettrace()
def restore_sys_gettrace(self, trace_fxn):
sys.settrace(trace_fxn)
def get___import__(self):
return builtins.__import__
def restore___import__(self, import_):
builtins.__import__ = import_
def get_warnings_filters(self):
return id(warnings.filters), warnings.filters, warnings.filters[:]
def restore_warnings_filters(self, saved_filters):
warnings.filters = saved_filters[1]
warnings.filters[:] = saved_filters[2]
def get_asyncore_socket_map(self):
asyncore = sys.modules.get('asyncore')
# XXX Making a copy keeps objects alive until __exit__ gets called.
return asyncore and asyncore.socket_map.copy() or {}
def restore_asyncore_socket_map(self, saved_map):
asyncore = sys.modules.get('asyncore')
if asyncore is not None:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_shutil_archive_formats(self):
# we could call get_archives_formats() but that only returns the
# registry keys; we want to check the values too (the functions that
# are registered)
return shutil._ARCHIVE_FORMATS, shutil._ARCHIVE_FORMATS.copy()
def restore_shutil_archive_formats(self, saved):
shutil._ARCHIVE_FORMATS = saved[0]
shutil._ARCHIVE_FORMATS.clear()
shutil._ARCHIVE_FORMATS.update(saved[1])
def get_shutil_unpack_formats(self):
return shutil._UNPACK_FORMATS, shutil._UNPACK_FORMATS.copy()
def restore_shutil_unpack_formats(self, saved):
shutil._UNPACK_FORMATS = saved[0]
shutil._UNPACK_FORMATS.clear()
shutil._UNPACK_FORMATS.update(saved[1])
def get_logging__handlers(self):
# _handlers is a WeakValueDictionary
return id(logging._handlers), logging._handlers, logging._handlers.copy()
def restore_logging__handlers(self, saved_handlers):
# Can't easily revert the logging state
pass
def get_logging__handlerList(self):
# _handlerList is a list of weakrefs to handlers
return id(logging._handlerList), logging._handlerList, logging._handlerList[:]
def restore_logging__handlerList(self, saved_handlerList):
# Can't easily revert the logging state
pass
def get_sys_warnoptions(self):
return id(sys.warnoptions), sys.warnoptions, sys.warnoptions[:]
def restore_sys_warnoptions(self, saved_options):
sys.warnoptions = saved_options[1]
sys.warnoptions[:] = saved_options[2]
# Controlling dangling references to Thread objects can make it easier
# to track reference leaks.
def get_threading__dangling(self):
if not threading:
return None
# This copies the weakrefs without making any strong reference
return threading._dangling.copy()
def restore_threading__dangling(self, saved):
if not threading:
return
threading._dangling.clear()
threading._dangling.update(saved)
# Same for Process objects
def get_multiprocessing_process__dangling(self):
if not multiprocessing:
return None
# This copies the weakrefs without making any strong reference
return multiprocessing.process._dangling.copy()
def restore_multiprocessing_process__dangling(self, saved):
if not multiprocessing:
return
multiprocessing.process._dangling.clear()
multiprocessing.process._dangling.update(saved)
def get_sysconfig__CONFIG_VARS(self):
# make sure the dict is initialized
sysconfig.get_config_var('prefix')
return (id(sysconfig._CONFIG_VARS), sysconfig._CONFIG_VARS,
dict(sysconfig._CONFIG_VARS))
def restore_sysconfig__CONFIG_VARS(self, saved):
sysconfig._CONFIG_VARS = saved[1]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(saved[2])
def get_sysconfig__INSTALL_SCHEMES(self):
return (id(sysconfig._INSTALL_SCHEMES), sysconfig._INSTALL_SCHEMES,
sysconfig._INSTALL_SCHEMES.copy())
def restore_sysconfig__INSTALL_SCHEMES(self, saved):
sysconfig._INSTALL_SCHEMES = saved[1]
sysconfig._INSTALL_SCHEMES.clear()
sysconfig._INSTALL_SCHEMES.update(saved[2])
def get_support_TESTFN(self):
if os.path.isfile(support.TESTFN):
result = 'f'
elif os.path.isdir(support.TESTFN):
result = 'd'
else:
result = None
return result
def restore_support_TESTFN(self, saved_value):
if saved_value is None:
if os.path.isfile(support.TESTFN):
os.unlink(support.TESTFN)
elif os.path.isdir(support.TESTFN):
shutil.rmtree(support.TESTFN)
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
get_name = 'get_' + method_suffix
restore_name = 'restore_' + method_suffix
yield name, getattr(self, get_name), getattr(self, restore_name)
def __enter__(self):
self.saved_values = dict((name, get()) for name, get, restore
in self.resource_info())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
# Check for changes to the resource's value
if current != original:
self.changed = True
restore(original)
if not self.quiet:
print("Warning -- {} was modified by {}".format(
name, self.testname),
file=sys.stderr)
if self.verbose > 1:
print(" Before: {}\n After: {} ".format(
original, current),
file=sys.stderr)
return False
def runtest_inner(test, verbose, quiet,
huntrleaks=False, debug=False, display_failure=True):
support.unload(test)
test_time = 0.0
refleak = False # True if the test leaked references.
try:
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
with saved_test_environment(test, verbose, quiet) as environment:
start_time = time.time()
the_package = __import__(abstest, globals(), locals(), [])
the_module = getattr(the_package, test)
# If the test has a test_main, that will run the appropriate
# tests. If not, use normal unittest test loading.
test_runner = getattr(the_module, "test_main", None)
if test_runner is None:
tests = unittest.TestLoader().loadTestsFromModule(the_module)
test_runner = lambda: support.run_unittest(tests)
test_runner()
if huntrleaks:
refleak = dash_R(the_module, test, test_runner,
huntrleaks)
test_time = time.time() - start_time
except support.ResourceDenied as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return RESOURCE_DENIED, test_time
except unittest.SkipTest as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except support.TestFailed as msg:
if display_failure:
print("test", test, "failed --", msg, file=sys.stderr)
else:
print("test", test, "failed", file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
except:
msg = traceback.format_exc()
print("test", test, "crashed --", msg, file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
else:
if refleak:
return FAILED, test_time
if environment.changed:
return ENV_CHANGED, test_time
return PASSED, test_time
def cleanup_test_droppings(testname, verbose):
import shutil
import stat
import gc
# First kill any dangling references to open files etc.
# This can also issue some ResourceWarnings which would otherwise get
# triggered during the following test run, and possibly produce failures.
gc.collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print("%r left behind %s %r" % (testname, kind, name))
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception as msg:
print(("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
def dash_R(the_module, test, indirect_test, huntrleaks):
"""Run a test multiple times, looking for reference leaks.
Returns:
False if the test didn't leak references; True if we detected refleaks.
"""
# This code is hackish and inelegant, but it seems to do the job.
import copyreg
import collections.abc
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copyreg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
try:
import zipimport
except ImportError:
zdc = None # Run unmodified on platforms without zipimport support
else:
zdc = zipimport._zip_directory_cache.copy()
abcs = {}
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
if indirect_test:
def run_the_test():
indirect_test()
else:
def run_the_test():
del sys.modules[the_module.__name__]
exec('import ' + the_module.__name__)
deltas = []
nwarmup, ntracked, fname = huntrleaks
fname = os.path.join(support.SAVEDCWD, fname)
repcount = nwarmup + ntracked
print("beginning", repcount, "repetitions", file=sys.stderr)
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr)
sys.stderr.flush()
dash_R_cleanup(fs, ps, pic, zdc, abcs)
for i in range(repcount):
rc_before = sys.gettotalrefcount()
run_the_test()
sys.stderr.write('.')
sys.stderr.flush()
dash_R_cleanup(fs, ps, pic, zdc, abcs)
rc_after = sys.gettotalrefcount()
if i >= nwarmup:
deltas.append(rc_after - rc_before)
print(file=sys.stderr)
if any(deltas):
msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
print(msg, file=sys.stderr)
sys.stderr.flush()
with open(fname, "a") as refrep:
print(msg, file=refrep)
refrep.flush()
return True
return False
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
import gc, copyreg
import _strptime, linecache
import urllib.parse, urllib.request, mimetypes, doctest
import struct, filecmp, collections.abc
from distutils.dir_util import _path_created
from weakref import WeakSet
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Restore some original values.
warnings.filters[:] = fs
copyreg.dispatch_table.clear()
copyreg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
try:
import zipimport
except ImportError:
pass # Run unmodified on platforms without zipimport support
else:
zipimport._zip_directory_cache.clear()
zipimport._zip_directory_cache.update(zdc)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
obj._abc_registry = abcs.get(obj, WeakSet()).copy()
obj._abc_cache.clear()
obj._abc_negative_cache.clear()
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
# Clear assorted module caches.
_path_created.clear()
re.purge()
_strptime._regex_cache.clear()
urllib.parse.clear_cache()
urllib.request.urlcleanup()
linecache.clearcache()
mimetypes._default_mime_types()
filecmp._cache.clear()
struct._clearcache()
doctest.master = None
try:
import ctypes
except ImportError:
# Don't worry about resetting the cache if ctypes is not supported
pass
else:
ctypes._reset_cache()
# Collect cyclic trash.
gc.collect()
def warm_caches():
# char cache
s = bytes(range(256))
for i in range(256):
s[i:i+1]
# unicode cache
x = [chr(i) for i in range(256)]
# int cache
x = list(range(-5, 257))
def findtestdir(path=None):
return path or os.path.dirname(__file__) or os.curdir
def removepy(names):
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print(fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks))
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
#
# Special cases:
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_timeout
# Controlled by test_timeout.skip_expected. Requires the network
# resource and a socket module.
#
# Tests that are expected to be skipped everywhere except on one platform
# are also handled separately.
_expectations = (
('win32',
"""
test__locale
test_crypt
test_curses
test_dbm
test_devpoll
test_fcntl
test_fork1
test_epoll
test_dbm_gnu
test_dbm_ndbm
test_grp
test_ioctl
test_largefile
test_kqueue
test_openpty
test_ossaudiodev
test_pipes
test_poll
test_posix
test_pty
test_pwd
test_resource
test_signal
test_syslog
test_threadsignals
test_wait3
test_wait4
"""),
('linux',
"""
test_curses
test_devpoll
test_largefile
test_kqueue
test_ossaudiodev
"""),
('unixware',
"""
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
"""),
('openunix',
"""
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
"""),
('sco_sv',
"""
test_asynchat
test_fork1
test_epoll
test_gettext
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_queue
test_sax
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
"""),
('darwin',
"""
test__locale
test_curses
test_devpoll
test_epoll
test_dbm_gnu
test_gdb
test_largefile
test_locale
test_minidom
test_ossaudiodev
test_poll
"""),
('sunos',
"""
test_curses
test_dbm
test_epoll
test_kqueue
test_dbm_gnu
test_gzip
test_openpty
test_zipfile
test_zlib
"""),
('hp-ux',
"""
test_curses
test_epoll
test_dbm_gnu
test_gzip
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_zipfile
test_zlib
"""),
('cygwin',
"""
test_curses
test_dbm
test_devpoll
test_epoll
test_ioctl
test_kqueue
test_largefile
test_locale
test_ossaudiodev
test_socketserver
"""),
('os2emx',
"""
test_audioop
test_curses
test_epoll
test_kqueue
test_largefile
test_mmap
test_openpty
test_ossaudiodev
test_pty
test_resource
test_signal
"""),
('freebsd',
"""
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_pty
test_socketserver
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_timeout
test_urllibnet
test_multiprocessing
"""),
('aix',
"""
test_bz2
test_epoll
test_dbm_gnu
test_gzip
test_kqueue
test_ossaudiodev
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_zipimport
test_zlib
"""),
('openbsd',
"""
test_ctypes
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_normalization
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
"""),
('netbsd',
"""
test_ctypes
test_curses
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
"""),
)
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_timeout
self.valid = False
expected = None
for item in _expectations:
if sys.platform.startswith(item[0]):
expected = item[1]
break
if expected is not None:
self.expected = set(expected.split())
# These are broken tests, for now skipped on every platform.
# XXX Fix these!
self.expected.add('test_nis')
# expected to be skipped on every platform, even Linux
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
# doctest, profile and cProfile tests fail when the codec for the
# fs encoding isn't built in because PyUnicode_Decode() adds two
# calls into Python.
encs = ("utf-8", "latin-1", "ascii", "mbcs", "utf-16", "utf-32")
if sys.getfilesystemencoding().lower() not in encs:
self.expected.add('test_profile')
self.expected.add('test_cProfile')
self.expected.add('test_doctest')
if test_timeout.skip_expected:
self.expected.add('test_timeout')
if sys.platform != "win32":
# test_sqlite is only reliable on Windows where the library
# is distributed with Python
WIN_ONLY = {"test_unicode_file", "test_winreg",
"test_winsound", "test_startfile",
"test_sqlite", "test_msilib"}
self.expected |= WIN_ONLY
if sys.platform != 'sunos5':
self.expected.add('test_nis')
if support.python_is_optimized():
self.expected.add("test_gdb")
self.valid = True
def isvalid(self):
"Return true iff _ExpectedSkips knows about the current platform."
return self.valid
def getexpected(self):
"""Return set of test names we expect to skip on current platform.
self.isvalid() must be true.
"""
assert self.isvalid()
return self.expected
def _make_temp_dir_for_build(TEMPDIR):
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. It eases the cleanup of leftover
# files using command "make distclean".
if sysconfig.is_python_build():
TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build')
TEMPDIR = os.path.abspath(TEMPDIR)
try:
os.mkdir(TEMPDIR)
except FileExistsError:
pass
# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
TESTCWD = 'test_python_{}'.format(os.getpid())
TESTCWD = os.path.join(TEMPDIR, TESTCWD)
return TEMPDIR, TESTCWD
if __name__ == '__main__':
# Remove regrtest.py's own directory from the module search path. Despite
# the elimination of implicit relative imports, this is still needed to
# ensure that submodules of the test package do not inappropriately appear
# as top-level modules even when people (or buildbots!) invoke regrtest.py
# directly instead of using the -m switch
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = len(sys.path)
while i >= 0:
i -= 1
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
# findtestdir() gets the dirname out of __file__, so we have to make it
# absolute before changing the working directory.
# For example __file__ may be relative when running trace or profile.
# See issue #9323.
__file__ = os.path.abspath(__file__)
# sanity check
assert __file__ == os.path.abspath(sys.argv[0])
TEMPDIR, TESTCWD = _make_temp_dir_for_build(TEMPDIR)
# Run the tests in a context manager that temporary changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
# available from support.SAVEDCWD.
with support.temp_cwd(TESTCWD, quiet=True):
main()
|
MergeManager.py | import subprocess
import threading
class MergeManager(object):
def __init__(self, video_format, audio_format, file_helper):
self.video_format = video_format
self.audio_format = audio_format
self.file_helper = file_helper
self.thread = None
pass
def merge(self, file_name):
self.thread = threading.Thread(target=self.mergeDelegate, args=[file_name])
self.thread.start()
def mergeDelegate(self, file_name):
input_video_file_name = f"tmp_{file_name}.{self.video_format}"
input_audio_file_name = f"tmp_{file_name}.{self.audio_format}"
print(f"Merging {input_video_file_name} and {input_audio_file_name} into {file_name}.{self.video_format}...")
cmd = f"ffmpeg -ac 2 -channel_layout stereo -i {input_audio_file_name} -i {input_video_file_name} -pix_fmt yuv420p {file_name}.{self.video_format}"
subprocess.call(cmd, shell=True)
print("Successfully merged")
self.file_helper.deleteFile(input_video_file_name)
self.file_helper.deleteFile(input_audio_file_name)
pass
|
with_progress.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import sys
from urllib.request import urlretrieve
from threading import Thread
def reporthook(blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 100.0 / totalsize
if percent > 100:
percent = 100
readsofar = totalsize
s = "\r%5.1f%% %*d / %d" % (percent, len(str(totalsize)), readsofar, totalsize)
sys.stdout.write(s)
if readsofar >= totalsize: # Near the end
sys.stdout.write("\n")
# Total size is unknown
else:
sys.stdout.write(f"read {readsofar}\n")
def download(url: str, file_name: str = None, as_thread=False, callback_func=None) -> str:
if as_thread:
def run(url, file_name, reporthook, callback_func):
local_file_name, _ = urlretrieve(url, file_name, reporthook=reporthook)
if callable(callback_func):
callback_func(local_file_name)
return local_file_name
thread = Thread(target=run, args=(url, file_name, reporthook, callback_func))
thread.start()
else:
return urlretrieve(url, file_name, reporthook=reporthook)[0]
if __name__ == '__main__':
URL = 'https://codeload.github.com/gil9red/SimplePyScripts/zip/master'
print(download(URL))
print()
print(download(URL, 'SimplePyScripts.zip'))
print('\n')
sys.stderr.write('Threading...\n')
print(download(URL, 'SimplePyScripts.zip', as_thread=True))
def callback_func(file_name: str):
print('File name:', file_name)
print(download(URL, 'SimplePyScripts.zip', as_thread=True, callback_func=callback_func))
|
process_limiter.py | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 12 14:29:33 2020
@author: Daniel
"""
import multiprocessing as mp
import time as t
def p_limit(processes, p_max):
"""
Runs a limited number of custom processes running at one time.
Parameters
----------
processes : list
List of processes.
p_max : TYPE
The amount of processes that can be run at one time.
Returns
-------
None.
"""
wait_list = processes
running_list = []
if len(wait_list) < p_max:
p_max = len(wait_list)
remove_list = []
for i in range(p_max):
proc = wait_list[i]
proc.start()
running_list.append(proc)
remove_list.append(proc)
for i in remove_list:
wait_list.remove(i)
while len(wait_list) != 0 or len(running_list) != 0:
remove_list = []
for i in running_list:
if not i.is_alive():
i.join()
remove_list.append(i)
for i in remove_list:
running_list.remove(i)
if len(wait_list) == 0:
continue
proc = wait_list.pop(0)
proc.start()
running_list.append(proc)
def p_limit_procdef(p_num, p_max, func, params, kwparams):
"""
Runs a limited number of auto generated processes running at one time.
Parameters
----------
p_num : int
The total number of processes.
p_max : int
The amount of processes that can be run at one time.
func : function
The function to be ran.
params : list
A list of tuples. The tuples are the parameter lists for each process.
Length must be p_num long.
kwparams : list
A list of dictionaries. The kwargs for func. Length must be p_num long.
Returns
-------
None.
"""
assert len(params) == p_num, "params must be the name length as p_num"
assert len(kwparams) == p_num, "kwparams must be the name length as p_num"
wait_list = []
running_list = []
for i in range(p_num):
proc = mp.Process(name="mp_tools_limit_" + str(i), target=func, args=params[i], kwargs=kwparams[i])
wait_list.append(proc)
if len(wait_list) < p_max:
p_max = len(wait_list)
remove_list = []
for i in range(p_max):
proc = wait_list[i]
proc.start()
running_list.append(proc)
remove_list.append(proc)
for i in remove_list:
wait_list.remove(i)
while len(wait_list) != 0 and len(running_list) != 0:
remove_list = []
for i in running_list:
if not i.is_alive():
#print("Process finished")
i.join()
remove_list.append(i)
for i in remove_list:
running_list.remove(i)
proc = wait_list.pop(0)
proc.start()
running_list.append(proc)
def test(ee, rr="a"):
t.sleep(5)
print(ee + rr)
if __name__ == "__main__":
proc1 = mp.Process(target=test, args=["asdf"])
proc2 = mp.Process(target=test, args=["asdff"])
proc3 = mp.Process(target=test, args=["asdf"])
proc4 = mp.Process(target=test, args=["asdff"])
p_limit([proc1, proc2, proc3], 3)
|
streamGL.py | import numpy
import cv2
from threading import Thread
import sys
import time
if sys.version_info >= (3, 0):
from queue import Queue
else:
from Queue import Queue
class FStream:
def __init__(self, path):
self.stream = cv2.VideoCapture(path)
self.stopped= False
self.Q = Queue(256)
def start(self):
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
while True:
if self.stopped:
return
if not self.Q.full():
(ret,frame) = self.stream.read()
if not ret:
self.stop()
return
self.Q.put(frame)
def read(self):
return self.Q.get()
def more(self):
return self.Q.qsize() > 0
def stop(self):
self.stopped=True
class DStream:
def __init__(self, win):
self.win=win
self.stopped= False
self.Q = Queue(128)
def start(self):
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
while True:
if self.stopped:
return
if self.more():
frame = self.read()
cv2.imshow(self.win, frame)
if cv2.waitKey(1)==27:
sys.exit(1)
def put(self,frame):
if not self.Q.full():
self.Q.put(frame)
def read(self):
return self.Q.get()
def more(self):
return self.Q.qsize() > 0
def stop(self):
self.stopped=True
fsv = FStream("1mb2.mp4").start()
dsv = DStream("video").start()
time.sleep(1.0)
while True:
frame = fsv.read()
start = time.time()
for i in range(0,100000):pass # instead of Main Process
print("%.5f"%(time.time()-start))
dsv.put(frame)
if not fsv.more() and fsv.stopped:
break
|
add_no_lock.py | #!/usr/bin/python
from multiprocessing import Process, Value
# adding without using lock
# gives erroneous output
def add(val):
val.value += 1
def main():
val = Value('i', 0)
processes = [Process(target=add, args=(val, )) for _ in range(200)]
for p in processes:
p.start()
for p in processes:
p.join()
print(f'Value is {val.value}')
if __name__ == "__main__":
main()
|
client-cli.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
import os
import json
import hashlib
import threading
import curses
import sys
from datetime import datetime
from signal import SIGTERM
from re import match as re_match
from argparse import ArgumentParser
from getpass import getpass
from random import randint as random_int
from time import sleep
class Argparser(ArgumentParser):
def error(self, message):
self.print_help()
sys.stderr.write(f"\nError: {message}\n")
sys.exit(2)
class Layout:
TITLE_ROWS = 1
PROMPT_ROWS = 1
def __init__(self):
self.rows, self.cols = Layout.terminal_size()
# Calculate dimensions of each window
self.title_rows = Layout.TITLE_ROWS
self.title_cols = self.cols
self.title_start_row = 0
self.title_start_col = 0
self.history_rows = self.rows - Layout.TITLE_ROWS - Layout.PROMPT_ROWS
self.history_cols = self.cols
self.history_start_row = 1
self.history_start_col = 0
self.prompt_rows = Layout.PROMPT_ROWS
self.prompt_cols = self.cols
self.prompt_start_row = self.rows - 1
self.prompt_start_col = 0
@staticmethod
def terminal_size():
rows, cols = os.popen('stty size', 'r').read().split()
return int(rows), int(cols)
class Title:
def __init__(self, layout, title, screen):
self.window = curses.newwin(layout.title_rows, layout.title_cols,
layout.title_start_row, layout.title_start_col)
start_col = (layout.title_cols - len(title)) // 2
self.window.addstr(0, start_col, title, curses.A_BOLD)
def redraw(self):
self.window.refresh()
class History:
def __init__(self, layout, screen):
self.messages = []
self.layout = layout
self.screen = screen
self.window = curses.newwin(layout.history_rows, layout.history_cols,
layout.history_start_row, layout.history_start_col)
# Because of border, the number of visible rows/cols is fewer
self.visible_rows = self.layout.history_rows - 2
self.visible_cols = self.layout.history_cols - 2
def append(self, msg):
self.messages.append(msg)
def redraw(self):
self.window.clear()
self.window.border(0)
# Draw the last messages, count - number of visible rows
row = 1
for msg in self.messages[-self.visible_rows:]:
self.window.move(row, 1)
self.window.addstr(msg)
row += 1
self.window.refresh()
class Prompt:
def __init__(self, layout, screen):
self.layout = layout
self.screen = screen
self.window = curses.newwin(layout.prompt_rows, layout.prompt_cols,
layout.prompt_start_row, layout.prompt_start_col)
self.window.keypad(True)
self.window.addstr('> ')
def getchar(self):
return self.window.getch()
def getstr(self):
return self.window.getstr()
def redraw(self):
self.window.refresh()
def reset(self, text='> '):
self.window.clear()
self.window.addstr(text)
self.redraw()
class FpublicityCLI:
running = False
def __init__(self):
self.layout = Layout()
self.screen = None
self.confirm_pop_up = False
def _start_curses(self):
if FpublicityCLI.running:
raise Exception("Curses is already running")
self.screen = curses.initscr()
curses.cbreak()
self.screen.keypad(True)
FpublicityCLI.running = True
def _stop_curses(self):
if not FpublicityCLI.running:
raise Exception("Curses is not running")
curses.nocbreak()
self.screen.keypad(False)
self.screen = None
curses.endwin()
FpublicityCLI.running = False
def redraw(self):
self.screen.refresh()
self.history.redraw()
self.title.redraw()
self.prompt.redraw()
# def save_chat():
# with open('room_' + str(room_id) + '_' + str(int(datetime.now().timestamp())) + '.txt', 'w') as f:
# f.write('\n'.join([i for i in chat_history.get(0, END)])) # self.history
# messagebox.showinfo(title="Saved", message="Successfully saved chat!")
def chat_listener(self):
while True:
try:
data = rcv_cmd(sock)
if not data:
sleep(.1)
continue
data = json.loads(data)
if data['cmd'] == 'join_request':
self.confirm_pop_up = True
self.prompt.reset('User <' + data['args'][0] + '> wants to join. Accept (y/yes)? ')
elif data['cmd'] == 'inbox':
message = bytearray.fromhex(data['args'][1])
message = get_decode(message, chat_key).decode('cp1251')
msg_text = datetime.now().strftime("%H:%M") + ' <' + data['args'][0] + '>: ' + message
self.history.append(msg_text)
elif data['cmd'] == 'popup':
self.history.append('Info from server: ' + data['args'])
else:
self.history.append(datetime.now().strftime("%H:%M") + ' <' + data['args'][0] + '>: ' + data['args'][1])
self.redraw()
except:
sleep(.1)
def start(self):
try:
# Start curses and initialize all curses-based objects
self._start_curses()
self.title = Title(self.layout, "fpublicity cli v0.1.0", self.screen)
self.history = History(self.layout, self.screen)
self.prompt = Prompt(self.layout, self.screen)
self.redraw()
threading.Thread(target=self.chat_listener).start()
# Run the main loop
while True:
message = self.prompt.getstr().decode('cp1251')
if self.confirm_pop_up:
snd_cmd(sock, message)
self.confirm_pop_up = False
self.prompt.reset()
continue
clear_input = False
if not message.strip():
continue
if message[0] == '/':
first_space = message.find(' ')
message = [message[:first_space], message[first_space + 1:]] if first_space != -1 else [message, '']
commands = ('/change_room_key', '/kick', '/delete_room', '/info', '/change_password', '/sound')
if message[0] in commands:
clear_input = True
if message[0] == '/change_room_key':
file_path = message[1]
try:
new_hash = hashlib.md5(getkey(file_path)).hexdigest()
snd_cmd(sock, jsonfy_request('set_room_key', (new_hash,)))
except FileNotFoundError:
self.history.append("Error: File " + file_path + " is not found!")
self.redraw()
if message[0] == '/exit':
self.stop()
os.kill(os.getpid(), SIGTERM)
if message[0] == '/kick':
snd_cmd(sock, jsonfy_request('kick_user', (message[1],)))
if message[0] == '/delete_room':
snd_cmd(sock, jsonfy_request('delete_room', ("",)))
if message[0] == '/info':
snd_cmd(sock, jsonfy_request('room_info', ("",)))
if message[0] == '/change_password':
if not message[1]:
self.history.append("Password cannot be empty!")
self.redraw()
else:
snd_cmd(sock, jsonfy_request('change_password', (hash_md5(message[1]),)))
else:
if key_hash() == '0':
self.prompt.reset()
self.history.append("You need to set room key first. Use /change_room_key <file name>")
self.redraw()
continue
message = message.replace('\n', ' ')
try:
message = bytearray(message.encode('cp1251'))
except:
self.history.append("Cannot encode some characters. Use cp1251 only")
self.redraw()
else:
encoded_msg = get_encode(message, chat_key)
if not encoded_msg:
self.history.append("Message is too long. Type something shorter")
self.redraw()
else:
encoded_msg = ''.join([format(x, '02x') for x in encoded_msg])
snd_cmd(sock, jsonfy_request('broadcast', (encoded_msg,)))
clear_input = True
if clear_input:
self.prompt.reset()
# Ignore keyboard interrupts and exit cleanly
except:
self.stop()
os.kill(os.getpid(), SIGTERM)
def stop(self):
self._stop_curses()
def parse_args():
argparser = Argparser()
argparser.add_argument('-i', '--ip', help="Server IP", required=True, action="store")
argparser.add_argument('-p', '--port', help="Server Port", type=int, required=True, action="store")
argparser.add_argument('-k', '--key', help="Chat key file", action="store")
return argparser.parse_args()
def get_message_hash(message_bytes, bytes_len):
res = bytearray(bytes_len)
for i in range(len(message_bytes)):
res[i % bytes_len] = res[i % bytes_len] ^ message_bytes[i]
return res
def get_encode(message_bytes, key):
if len(message_bytes) > 248:
return
key_pos = random_int(0, len(key))
res = bytearray(256)
res[0] = key_pos & 255
res[1] = (key_pos >> 8) & 255
res[2] = (key_pos >> 16) & 255
res[3] = len(message_bytes)
curpos = 4
trash_size = (248 - len(message_bytes)) // 2
for i in range(curpos, curpos + trash_size - 1 + ((248 - len(message_bytes)) % 2) + 1):
res[i] = random_int(0, 255)
curpos += trash_size + ((248 - len(message_bytes)) % 2)
res[curpos:curpos + len(message_bytes)] = message_bytes[:]
curpos += len(message_bytes)
for i in range(curpos, curpos + trash_size):
res[i] = random_int(0, 255)
curpos += trash_size
msg_hash = get_message_hash(res, 4)
res[curpos:curpos + len(msg_hash)] = msg_hash[:]
for i in range(3, len(res)):
res[i] = res[i] ^ key[(key_pos + i) % len(key)]
return res
def get_decode(message_bytes, key):
if len(message_bytes) != 256:
return
decode_msg = bytearray(256)
key_pos = message_bytes[0] | (message_bytes[1] << 8) | (message_bytes[2] << 16)
decode_msg[:] = message_bytes[:]
for i in range(3, len(decode_msg)):
decode_msg[i] = decode_msg[i] ^ key[(key_pos + i) % len(key)]
res = bytearray(decode_msg[3])
res[:] = decode_msg[4 + ((248 - len(res)) // 2) + ((248 - len(res)) % 2):4 + ((248 - len(res)) // 2) + ((248 - len(res)) % 2) + len(res)]
decode_hash = bytearray(4)
decode_hash[:4] = decode_msg[-4:]
decode_msg[-4:] = bytearray(4)[:]
msg_hash = get_message_hash(decode_msg, 4)
if msg_hash != decode_hash:
return
return res
def genkey(bytes_size, file_path):
key = bytearray([random_int(0, 255) for _ in range(bytes_size)])
with open(file_path, 'wb') as f:
f.write(key)
print(f"Info: Generated key. New key file: ({file_path})")
def getkey(file_path):
with open(file_path, 'rb') as f:
key = f.read()
return key
args = parse_args()
ipv4, port = args.ip, args.port
chat_key = ''
if args.key:
try:
chat_key = getkey(args.key)
except FileNotFoundError:
exit('KeyFile "' + args.key + '" was not found')
if not re_match(r"\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b", ipv4):
exit("Given IP is invalid")
if not port in range(1, 65354):
exit("Given port is invalid")
def rcv_cmd(connection):
data = bytearray()
while True:
try:
chunk = connection.recv(1)
if chunk == b'\n':
if data in (b"Password has been changed successfully!", ):
print('Info: ' + data.decode('utf-8'))
else:
return data.decode('utf-8')
if not chunk:
print("No connection: Connection has been dropped by server")
os.kill(os.getpid(), SIGTERM)
data += chunk
if len(data) > 1536:
return
except:
print("No connection: Connection has been dropped by server")
os.kill(os.getpid(), SIGTERM)
def wait_rcv_cmd(connection):
while True:
data = rcv_cmd(connection)
if not data:
continue
return data
def snd_cmd(connection, cmd):
try:
connection.send((cmd + '\n').encode('utf-8'))
except:
print("No connection: Connection has been dropped by server")
os.kill(os.getpid(), SIGTERM)
def jsonfy_request(call_func, args):
res = {
"call_func": call_func,
"args": args
}
return json.dumps(res)
def hash_md5(data):
return hashlib.md5(data.encode('utf-8')).hexdigest()
def key_hash():
return '0' if not chat_key else hashlib.md5(chat_key).hexdigest()
def create_room_window():
def create_room(room_name, username, password):
snd_cmd(sock, jsonfy_request('create_room', (str(room_name), str(username), str(hash_md5(password)))))
print("Your Room id: " + str(wait_rcv_cmd(sock)))
os.kill(os.getpid(), SIGTERM)
username = input('Username: ')
password = getpass('Password: ')
room_name = input('Room name: ')
create_room(room_name, username, password)
def window_chat(room_id):
# 950605819 lilproskater 123
fpublicity_cli = FpublicityCLI()
fpublicity_cli.start()
def enter_room_window():
def login_room(room_id, username, password):
snd_cmd(sock, jsonfy_request('login_room', (room_id, username,)))
salt = wait_rcv_cmd(sock)
snd_cmd(sock, hash_md5(key_hash() + salt + hash_md5(password)))
login_response = str(wait_rcv_cmd(sock))
if login_response != 'Logged in':
print("Error: " + login_response)
os.kill(os.getpid(), SIGTERM)
window_chat(room_id)
room_id = input('Room id: ')
username = input('Username: ')
password = getpass('Password: ')
login_room(room_id, username, password)
def register_room_window():
def registrate_room(room_id, username, password):
snd_cmd(sock,
jsonfy_request('registrate_room', (str(room_id), key_hash(), str(username), str(hash_md5(password)))))
reg_response = str(wait_rcv_cmd(sock))
print("Info: " + reg_response)
if reg_response == "Wait until admin will accept you":
print("Info: " + str(wait_rcv_cmd(sock)))
os.kill(os.getpid(), SIGTERM)
room_id = input('Room id: ')
username = input('Username: ')
password = getpass('Password: ')
registrate_room(room_id, username, password)
def generate_key_window():
genkey(1024 ** 2, 'key' + str(random_int(1000000, 9999999)) + '.bin')
def help_window():
help_text = '''Fpublicity dev. start date: 29.11.2020
Generate Key:
Generates a megabyte key file that you have to keep.
Please, share this key privately only with room members.
Create Room:
To create the private room you have to enter Room name,
Admin username and password. When the room is created
you will get the room id that you need to save it.
Enter Room:
You have to be a member of the room if you want to enter.
You will get "Invalid credentials" error if you have right
username and password, but wrong room key.
Register Room:
To join the room first you need to have the same room key.
Then enter room id of the room you want to join. Choose
username and password and wait until admin accepts you.
Example:
Imagine, Alice and Bob want to chat privately:
*Alice and Bob needs fpublicity server ip and port*
*They need to start program with --ip <ip> --port <port>*
Alice: Create Room
Room name => Bob_and_Alice
Username => a1is3
Password => ********
Server Response: room id = 123123
*Alice needs to save this room id*
Alice: Generate Key
Key file name: key5711053.bin
Alice: Enter Room
Room id => 123123
Username => a1is3
Password => ********
Alice: /change_room_key key5711053.bin
*Alice restarts the program with --key key5711053.bin argument*
*Alice shares the key and room id to Bob*
Alice: Enter room
*Bob starts the program with --key key5711053.bin argument*
Bob: Registrate room
Room id => 123123
Username => b0b
Password => **********
*Alice accepts Bob*
Bob: Enter Room
Room id => 123123
Username => b0b
Password => **********
*Alice and Bob start to chat privately*
*Other users also can join the room just as Bob did*
WARNING:
If you are entering the room you just created you need to
start the program with no --key argument. Then update the room
key.
Developers:
N0n3-github, x64BitWorm'''
print(help_text)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10.0)
try:
sock.connect((ipv4, port))
except (ConnectionRefusedError, socket.timeout):
print("Error: Could not connect to Server " + str(ipv4) + ':' + str(port))
os.kill(os.getpid(), SIGTERM)
sock.settimeout(None)
print('Actions:\n1. Create Room\n2. Enter Room\n3. Register Room\n4. Generate Key\n5. Help')
try:
choice = int(input('Choose action: '))
if choice not in range(1, 6):
print('Error: Invalid action chosen')
function = [create_room_window, enter_room_window, register_room_window, generate_key_window, help_window]
function[choice-1]()
except:
os.kill(os.getpid(), SIGTERM)
|
socks2http.py | import asyncore
import socket
import socks
import traceback
from threading import Thread
from Queue import Queue
VERSION = 'socks2http/0.01'
HTTPVER = 'HTTP/1.1'
SOCKS_SERVER_ADDR = '127.0.0.1'
SOCKS_SERVER_PORT = 8087
conn_q = Queue()
def connection_worker():
while True:
c = conn_q.get()
try:
c.connect_target(c.host)
if c.init_req:
c.socks.addbuf(c.init_req)
else:
c.addbuf(HTTPVER+' 200 Connection established\n'+
'Proxy-agent: %s\n\n'%VERSION)
except Exception as e:
traceback.print_exc()
conn_q.task_done()
class SocksProxyHandler(asyncore.dispatcher):
def __init__(self, sock, src):
asyncore.dispatcher.__init__(self, sock)
self.sock = sock
self.src = src
self.sendbuf = ''
def handle_read(self):
data = self.recv(8192)
if len(data) == 0:
self.handle_close()
return None
try:
self.src.addbuf(data)
except Exception as e:
traceback.print_exc()
self.close()
def handle_close(self):
self.close()
if len(self.src.sendbuf) > 0:
self.src.send(self.src.sendbuf)
self.src.close()
def addbuf(self, data):
self.sendbuf += data
def handle_write(self):
sent = self.send(self.sendbuf)
self.sendbuf = self.sendbuf[sent:]
def writable(self):
return (len(self.sendbuf) > 0)
class HTTPProxyHandler(asyncore.dispatcher):
def __init__(self, sock):
asyncore.dispatcher.__init__(self, sock)
self.connected = False
self.client_buffer = ''
self.sock = sock
self.target = socks.socksocket()
self.socks = None
self.sendbuf = ''
def connect_target(self, host):
i = host.find(':')
if i!=-1:
port = int(host[i+1:])
host = host[:i]
else:
port = 80
(soc_family, _, _, _, address) = socket.getaddrinfo(host, port)[0]
self.target.setproxy(socks.PROXY_TYPE_SOCKS5, SOCKS_SERVER_ADDR, SOCKS_SERVER_PORT)
self.target.connect(address)
self.socks = SocksProxyHandler(self.target, self)
def handle_close(self):
self.close()
if self.socks:
if len(self.socks.sendbuf) > 0:
self.socks.send(self.socks.sendbuf)
self.socks.close()
def addbuf(self, data):
self.sendbuf += data
def handle_write(self):
sent = self.send(self.sendbuf)
self.sendbuf = self.sendbuf[sent:]
def writable(self):
return (len(self.sendbuf) > 0)
def handle_read(self):
if not self.connected:
self.client_buffer += self.recv(8192)
end = self.client_buffer.find('\n')
if end != -1:
print '%s'%self.client_buffer[:end]
method, path, protocol = (self.client_buffer[:end+1]).split()
self.client_buffer = self.client_buffer[end+1:]
if method == 'CONNECT':
self.host = path
self.init_req = None
conn_q.put(self)
elif method in ('OPTIONS', 'GET', 'HEAD', 'POST', 'PUT',
'DELETE', 'TRACE'):
path = path[7:]
i = path.find('/')
host = path[:i]
path = path[i:]
self.host = host
self.init_req = '%s %s %s\n'%(method, path, protocol)+ \
self.client_buffer
conn_q.put(self)
else:
print("HTTPProxy protocol error")
self.close()
self.client_buffer = ''
self.connected = True
return None
data = self.recv(8192)
if len(data) == 0:
self.handle_close()
return None
try:
self.socks.addbuf(data)
except Exception as e:
traceback.print_exc()
self.close()
class HTTPProxyServer(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
print "serving on http://%s:%d" %(host, port)
def handle_accept(self):
pair = self.accept()
if pair is not None:
sock, addr = pair
handler = HTTPProxyHandler(sock)
#This thread pool is used for socks connection which only supports blocking connect
for i in range(3):
t = Thread(target=connection_worker)
t.daemon =True
t.start()
server = HTTPProxyServer('localhost', 8080)
asyncore.loop(timeout=0.05)
|
miniterm.py | #!/home/pi/mycroft-core/.venv/bin/python
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+%c' % (ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
pass
def cleanup(self):
pass
def getkey(self):
return None
def write_bytes(self, s):
self.byte_output.write(s)
self.byte_output.flush()
def write(self, s):
self.output.write(s)
self.output.flush()
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt':
import msvcrt
import ctypes
class Out(object):
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
elif os.name == 'posix':
import atexit
import termios
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError("Sorry no implementation for your platform (%s) available." % sys.platform)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update({
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update({
32: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for t in text:
if ' ' <= t < '\x7f' or t in '\r\n\b\t':
r.append(t)
elif t < ' ':
r.append(unichr(0x2400 + ord(t)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(t)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
#~ sys.stderr.write('--- %-20s %s [%s]\n' % (port, desc, hwid))
sys.stderr.write('--- {:2}: {:20} {}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
self.receiver_thread.join()
def start(self):
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
self.alive = False
def join(self, transmit_only=False):
self.transmitter_thread.join()
if not transmit_only:
self.receiver_thread.join()
def update_transformations(self):
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f] for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
#~ sys.stderr.write('--- data escaping: %s linefeed: %s\n' % (
#~ REPR_MODES[self.repr_mode],
#~ LF_MODES[self.convert_outgoing]))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
# XXX would be nice if the writer could be interrupted at this
# point... to exit completely
raise
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
elif c in 'bB': # B -> change baudrate
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: %s ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def get_help_text(self):
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(
version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'),
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d # GS/CTRL+]
)
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14 # Menu: CTRL+T
)
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
timeout=1,
do_not_open=True)
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08'),
))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
is_bst.py | #!/usr/bin/env python3
"""Check whether binary search tree
Test whether a binary search tree data structure is implemented correctly.
Given a binary search tree with integers as its key, test whether it is a
correct binary search tree. Check whether the given binary tree structure
satisfies the following condition:
For any node of the left subtree its key must be strictly less than 𝑥, and for
any node in its right subtree its key must be strictly greater than 𝑥.
"""
import sys
import threading
sys.setrecursionlimit(10**7) # max depth of recursion
threading.stack_size(2**25) # new thread will get stack of such size
def checkNode(tree, index, left, right):
"""Recursive validation for given node of binary search tree
"""
if not left < tree[index][0] < right:
return False
if tree[index][1] > -1:
if not checkNode(tree, tree[index][1], left, tree[index][0]):
return False
if tree[index][2] > -1:
if not checkNode(tree, tree[index][2], tree[index][0], right):
return False
return True
def IsBST(tree):
"""validation for given tree for BST
"""
key = tree[0][0]
bound = pow(2, 31)
if tree[0][1] > -1:
if not checkNode(tree, tree[0][1], -bound, key):
return False
if tree[0][2] > -1:
if not checkNode(tree, tree[0][2], key, bound -1):
return False
return True
def main():
nodes = int(sys.stdin.readline().strip())
tree = []
for i in range(nodes):
tree.append(list(map(int, sys.stdin.readline().strip().split())))
if len(tree) < 2:
print("CORRECT")
else:
if IsBST(tree):
print("CORRECT")
else:
print("INCORRECT")
threading.Thread(target=main).start()
|
camera.py | from datetime import datetime
from picamera import PiCamera
from pathlib import Path
from PyQt5.QtCore import pyqtSignal, QObject
from threading import Lock, Thread
from src.cameratimerbackend import RepeatedTimer
class FileNameHelper():
def __init__(self):
# set default save directory, make folder if does not exist
self.savedir = '/home/pi/DaisyLiteGUI/images/'
Path(self.savedir).mkdir(parents=True, exist_ok=True)
# set default name and file format
self.NamePrefix = 'Im'
self.DateStamp = True
self.TimeStamp = True
self.FileFormat = 'jpeg'
self.filenamehelper(self.NamePrefix, self.DateStamp, self.TimeStamp, self.FileFormat)
# for custom file names, add default format programatically
self.custombool = False
self.customname = 'Im_day{timestamp:%Y%m%d}_time{timestamp:%H-%M-%S-%f}'
# jpg format settings
self.bayerInclude = True
self.JPGquality = 100
def filenameswitcher(self):
# for turning custom name on/off
if self.custombool:
self.filename_unformat = self.customname + '.' + self.FileFormat
elif not self.custombool:
self.filenamehelper(self.NamePrefix, self.DateStamp, self.TimeStamp, self.FileFormat)
def filenamehelper(self, prefix, Date, Time, Fformat):
# init file name
filename_unformat = prefix
# add data and time stamp according to preference
if Date:
filename_unformat = filename_unformat + '_day{timestamp:%Y%m%d}'
if Time:
filename_unformat = filename_unformat + '_time{timestamp:%H-%M-%S-%f}'
# add file format
self.filename_unformat = filename_unformat + '.' + Fformat
def filenameSetPrefix(self, Prefix_in):
# update date stamp status and name accordingly
self.NamePrefix = Prefix_in
self.filenamehelper(self.NamePrefix, self.DateStamp, self.TimeStamp, self.FileFormat)
def filenameSetFormat(self, Fformat_in):
# update file format and name accordingly
self.FileFormat = Fformat_in
self.filenamehelper(self.NamePrefix, self.DateStamp, self.TimeStamp, self.FileFormat)
def filenameSetDate(self, DateBool_in):
# update date stamp status and name accordingly
self.DateStamp = DateBool_in
self.filenamehelper(self.NamePrefix, self.DateStamp, self.TimeStamp, self.FileFormat)
def filenameSetTime(self, TimeBool_in):
# update time stamp status and name accordingly
self.TimeStamp = TimeBool_in
self.filenamehelper(self.NamePrefix, self.DateStamp, self.TimeStamp, self.FileFormat)
class CallBackEmitter(QObject):
# timer finished signal
timer_finished_signal = pyqtSignal()
def __init__(self):
super(CallBackEmitter, self).__init__()
class Camera(PiCamera):
def __init__(self):
super(Camera, self).__init__()
# set up camera hardware variables
self.initvar_camerahardware()
# set up timed thread variables (make each variable into list for multiple cameras)
self.initvar_cameratimer()
# preview state sentinel
self.preview_state = False
# get filename object
self.fn = FileNameHelper()
# lock to activate whilst still port in use
self.piclock = Lock()
# get callback emitter instance
self.callbackemitter = CallBackEmitter()
def initvar_camerahardware(self):
# set default resolution
self.resolution = (320, 240)
# turn off de-noiser for still and video images
self.image_denoise = False
self.video_denoise = False
# ensure saturation turned off
self.saturation = 0
# auto-white balance, starts auto
self.awb_mode = 'auto'
def initvar_cameratimer(self):
#every n seconds
self.everyN = 0
# for n seconds
self.forN = 0
# take n pictures
self.takeN = 0
# with spacing n
self.withgapN = 0
def capture(self):
with self.piclock:
# format filename with date/time stamp values if appropriate
filename = self.fn.savedir + self.fn.filename_unformat.format(timestamp=datetime.now())
# use parent method to capture, *bayer and quality only used for JPG formats*
super(Camera, self).capture(filename, format=self.fn.FileFormat, use_video_port=False, bayer=self.fn.bayerInclude, quality=self.fn.JPGquality)
def capture_as_thread(self):
capthread = Thread(target=self.capture())
capthread.start()
def start_timed_capture(self):
# special case for only 1 picture
if self.takeN == 1:
# init main time
self.maintimer = RepeatedTimer(self.everyN, self.capture, timelimit = self.forN, callback = self.callbackemitter.timer_finished_signal.emit)
else:
# init camera capture (short time scale) timer
self.cameratimer = RepeatedTimer(self.withgapN, self.capture, countlimit = self.takeN)
# init longer time scale timer
self.maintimer = RepeatedTimer(self.everyN, self.cameratimer.start_all, timelimit = self.forN, callback = self.callbackemitter.timer_finished_signal.emit)
# get thread and start
self.timedcapturethread = Thread(target = self.maintimer.start_all)
self.timedcapturethread.start()
def stop_timed_capture(self):
# stop timed capture, timer may not be running so have to try/except
try:
self.maintimer.stop()
except AttributeError:
pass
try:
self.cameratimer.stop()
except AttributeError:
pass
self.timedcapturethread.join()
print('Timer thread succesfully stopped.')
|
test_docxmlrpc.py | from DocXMLRPCServer import DocXMLRPCServer
import httplib
import sys
from test import test_support
threading = test_support.import_module('threading')
import time
import socket
import unittest
PORT = None
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because the
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
self.client.request("GET", "/")
self.client.getresponse()
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
def server(evt, numrequests):
serv = DocXMLRPCServer(("localhost", 0), logRequests=False)
try:
global PORT
PORT = serv.socket.getsockname()[1]
# Add some documentation
serv.set_server_title("DocXMLRPCServer Test Documentation")
serv.set_server_name("DocXMLRPCServer Test Docs")
serv.set_server_documentation(
"This is an XML-RPC server's documentation, but the server "
"can be used by POSTing to /RPC2. Try self.add, too.")
# Create and register classes and functions
class TestClass(object):
def test_method(self, arg):
"""Test method's docs. This method truly does very little."""
self.arg = arg
serv.register_introspection_functions()
serv.register_instance(TestClass())
def add(x, y):
"""Add two instances together. This follows PEP008, but has nothing
to do with RFC1952. Case should matter: pEp008 and rFC1952. Things
that start with http and ftp should be auto-linked, too:
http://google.com.
"""
return x + y
serv.register_function(add)
serv.register_function(lambda x, y: x-y)
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.server_close()
PORT = None
evt.set()
class DocXMLRPCHTTPGETServer(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
# Enable server feedback
DocXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
threading.Thread(target=server, args=(self.evt, 1)).start()
# wait for port to be assigned
n = 1000
while n > 0 and PORT is None:
time.sleep(0.001)
n -= 1
self.client = httplib.HTTPConnection("localhost:%d" % PORT)
def tearDown(self):
self.client.close()
self.evt.wait()
# Disable server feedback
DocXMLRPCServer._send_traceback_header = False
test_support.threading_cleanup(*self._threads)
def test_valid_get_response(self):
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Content-type"), "text/html")
# Server raises an exception if we don't start to read the data
response.read()
def test_invalid_get_response(self):
self.client.request("GET", "/spam")
response = self.client.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.getheader("Content-type"), "text/plain")
response.read()
def test_lambda(self):
"""Test that lambda functionality stays the same. The output produced
currently is, I suspect invalid because of the unencoded brackets in the
HTML, "<lambda>".
The subtraction lambda method is tested.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn('<dl><dt><a name="-<lambda>"><strong>'
'<lambda></strong></a>(x, y)</dt></dl>',
response.read())
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_autolinking(self):
"""Test that the server correctly automatically wraps references to
PEPS and RFCs with links, and that it linkifies text starting with
http or ftp protocol prefixes.
The documentation for the "add" method contains the test material.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(
('<dl><dt><a name="-add"><strong>add</strong></a>(x, y)</dt><dd>'
'<tt>Add two instances together. This '
'follows <a href="http://www.python.org/dev/peps/pep-0008/">'
'PEP008</a>, but has nothing<br>\nto do '
'with <a href="http://www.rfc-editor.org/rfc/rfc1952.txt">'
'RFC1952</a>. Case should matter: pEp008 '
'and rFC1952. Things<br>\nthat start '
'with http and ftp should be '
'auto-linked, too:<br>\n<a href="http://google.com">'
'http://google.com</a>.</tt></dd></dl>'), response.read())
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_system_methods(self):
"""Test the precense of three consecutive system.* methods.
This also tests their use of parameter type recognition and the
systems related to that process.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(
('<dl><dt><a name="-system.listMethods"><strong>system.listMethods'
'</strong></a>()</dt><dd><tt><a href="#-system.listMethods">system'
'.listMethods</a>() => [\'add\', \'subtract\','
' \'multiple\']<br>\n <br>\nReturns a list'
' of the methods supported by the'
' server.</tt></dd></dl>\n <dl><dt><a name="-system.methodHelp">'
'<strong>system.methodHelp</strong></a>(method_name)</dt><dd><tt>'
'<a href="#-system.methodHelp">system.methodHelp</a>(\'add\') '
'=> "Adds two integers together"<br>\n '
'<br>\nReturns a string containing documentation'
' for the specified method.</tt></dd></dl>\n '
'<dl><dt><a name="-system.methodSignature"><strong>system.'
'methodSignature</strong></a>(method_name)</dt><dd><tt><a href="#-'
'system.methodSignature">system.methodSignature</a>(\'add\') '
'=> [double, int, int]<br>\n <br>\nReturns'
' a list describing the signature of'
' the method. In the<br>\nabove example,'
' the add method takes two integers'
' as arguments<br>\nand returns a double'
' result.<br>\n <br>\nThis server does '
'NOT support system.methodSignature.</tt></dd></dl>'),
response.read())
def test_autolink_dotted_methods(self):
"""Test that selfdot values are made strong automatically in the
documentation."""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn("""Try self.<strong>add</strong>, too.""",
response.read())
def test_main():
test_support.run_unittest(DocXMLRPCHTTPGETServer)
if __name__ == '__main__':
test_main()
|
__init__.py | # Copyright 2021, Battelle Energy Alliance, LLC
# Python Packages
import os
import logging
import json
import time
import environs
from flask import Flask, request, Response, json
import deep_lynx
import threading
# Repository Modules
from .deep_lynx_query import query_deep_lynx
from .deep_lynx_import import import_to_deep_lynx
from .ml_adapter import main
import utils
# Global variables
api_client = None
lock_ = threading.Lock()
threads = list()
number_of_events = 1
env = environs.Env()
new_data = False
# configure logging. to overwrite the log file for each run, add option: filemode='w'
logging.basicConfig(filename='MLAdapter.log',
level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s',
filemode='w',
datefmt='%m/%d/%Y %H:%M:%S')
print('Application started. Logging to file MLAdapter.log')
def create_app():
""" This file and aplication is the entry point for the `flask run` command """
global number_of_events
global env
app = Flask(os.getenv('FLASK_APP'), instance_relative_config=True)
# Validate .env file exists
utils.validate_paths_exist(".env")
# Check required variables in the .env file, and raise error if not set
env = environs.Env()
env.read_env()
env.url("DEEP_LYNX_URL")
env.str("CONTAINER_NAME")
env.str("DATA_SOURCE_NAME")
env.list("DATA_SOURCES")
env.int("IMPORT_FILE_WAIT_SECONDS")
env.int("REGISTER_WAIT_SECONDS")
env.path("QUERY_FILE_NAME")
env.path("IMPORT_FILE_NAME")
env.path("ML_ADAPTER_OBJECT_LOCATION")
env.path("METADATA")
env.path("QUEUE_FILE_NAME")
env.int("QUEUE_LENGTH")
env.list("ML_ADAPTER_OBJECTS")
split = json.loads(os.getenv("SPLIT"))
if not isinstance(split, dict):
error = "must be dict, not {0}".format(type(split))
raise TypeError(error)
# Purpose to run flask once (not twice)
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
# Instantiate deep_lynx
container_id, data_source_id, api_client = deep_lynx_init()
os.environ["CONTAINER_ID"] = container_id
os.environ["DATA_SOURCE_ID"] = data_source_id
# Register for events to listen for
register_for_event(api_client)
# Create Thread object that runs the machine learning algorithms
# Thread object: activity that is run in a separate thread of control
# Daemon: a process that runs in the background. A daemon thread will shut down immediately when the program exits.
ml_thread = threading.Thread(target=main, daemon=True, name="ml_thread")
print("Created ml_thread")
threads.append(ml_thread)
# Start the thread’s activity
ml_thread.start()
# File clean up
if os.path.exists(os.getenv("QUEUE_FILE_NAME")):
os.remove(os.getenv("QUEUE_FILE_NAME"))
if os.path.exists(os.getenv("ML_ADAPTER_OBJECT_LOCATION")):
f = open(os.getenv("ML_ADAPTER_OBJECT_LOCATION"))
ml_adapter_object = json.load(f)
f.close()
if os.path.exists(ml_adapter_object["MODEL"]["output_file"]):
os.remove(ml_adapter_object["MODEL"]["output_file"])
if os.path.exists(ml_adapter_object["DATASET"]):
os.remove(ml_adapter_object["DATASET"])
os.remove(os.getenv("ML_ADAPTER_OBJECT_LOCATION"))
if os.path.exists("data/training_set.csv"):
os.remove("data/training_set.csv")
if os.path.exists("data/testing_set.csv"):
os.remove("data/testing_set.csv")
if os.path.exists("data/X_train.csv"):
os.remove("data/X_train.csv")
if os.path.exists("data/X_test.csv"):
os.remove("data/X_test.csv")
if os.path.exists("data/y_train.csv"):
os.remove("data/y_train.csv")
if os.path.exists("data/y_test.csv"):
os.remove("data/y_test.csv")
@app.route('/machinelearning', methods=['POST'])
def events():
global number_of_events
if 'application/json' not in request.content_type:
logging.warning('Received request with unsupported content type')
return Response('Unsupported Content Type. Please use application/json', status=400)
# Data from graph has been received
data = request.get_json()
try:
file_id = data["query"]["fileID"]
logging.info('Received event with data: ' + json.dumps(data))
except KeyError:
# The incoming payload doesn't have what we need, but still return a 200
return Response(response=json.dumps({'received': True}), status=200, mimetype='application/json')
# Retrieves file from Deep Lynx
name = "event_thread_" + str(number_of_events)
# Thread object: activity that is run in a separate thread of control
event_thread = threading.Thread(target=query_deep_lynx, args=(file_id, ), name=name)
print("Created ", name)
threads.append(event_thread)
number_of_events += 1
# Start the thread’s activity
event_thread.start()
# Join: Wait until the thread terminates. This blocks the calling thread until the thread whose join() method is called terminates.
event_thread.join()
with lock_:
new_data = True
print(name, " is done")
return Response(response=json.dumps({'received': True}), status=200, mimetype='application/json')
return app
def register_for_event(api_client: deep_lynx.ApiClient, iterations=30):
"""
Register with Deep Lynx to receive data_ingested events on applicable data sources
Args
api_client (deep_lynx.ApiClient): deep lynx api client
iterations (integer): the number of interations to try registering for events
"""
registered = False
# List of adapters to receive events from
data_ingested_adapters = json.loads(os.getenv("DATA_SOURCES"))
# Register events for listening from other data sources
while registered == False and iterations > 0:
# Get a list of data sources and validate that no error occurred
datasource_api = deep_lynx.DataSourcesApi(api_client)
data_sources = datasource_api.list_data_sources(os.getenv("CONTAINER_ID"))
if data_sources.is_error == False and len(data_sources.value) > 0:
#data_sources = data_sources.to_dict()["value"]
for data_source in data_sources.value:
# If the data source is found, create a registered event
if data_source.name in data_ingested_adapters:
events_api = deep_lynx.EventsApi(api_client)
# verify that this event action does not already exist
# by comparing to the established event action we would like to create
event_action = deep_lynx.CreateEventActionRequest(
data_source.container_id, data_source.id, "file_created", "send_data", None, "http://" +
os.getenv('FLASK_RUN_HOST') + ":" + os.getenv('FLASK_RUN_PORT') + "/machinelearning",
os.getenv("DATA_SOURCE_ID"), True)
actions = events_api.list_event_actions()
for action in actions.value:
# if destination, event_type, and data_source_id match, we know that this
# event action already exists
if action.destination == event_action.destination and action.event_type == event_action.event_type \
and action.data_source_id == event_action.data_source_id:
# this exact event action already exists, remove data source from list
logging.info('Event action on ' + data_source.name + ' already exists')
data_ingested_adapters.remove(data_source.name)
# continue event action creation if the same was not already found
if data_source.name in data_ingested_adapters:
create_action_result = events_api.create_event_action(event_action)
if create_action_result.is_error:
logging.warning('Error creating event action: ' + create_action_result.error)
else:
logging.info('Successful creation of event action on ' + data_source.name + ' datasource')
data_ingested_adapters.remove(data_source.name)
# If all events are registered
if len(data_ingested_adapters) == 0:
registered = True
logging.info('Successful registration on all adapters')
return registered
# If the desired data source and container is not found, repeat
logging.info(
f'Datasource(s) {", ".join(data_ingested_adapters)} not found. Next event registration attempt in {os.getenv("REGISTER_WAIT_SECONDS")} seconds.'
)
time.sleep(float(os.getenv('REGISTER_WAIT_SECONDS')))
iterations -= 1
return registered
def deep_lynx_init():
"""
Returns the container id, data source id, and api client for use with the DeepLynx SDK.
Assumes token authentication.
Args
None
Return
container_id (str), data_source_id (str), api_client (ApiClient)
"""
# initialize an ApiClient for use with deep_lynx APIs
configuration = deep_lynx.configuration.Configuration()
configuration.host = os.getenv('DEEP_LYNX_URL')
api_client = deep_lynx.ApiClient(configuration)
# perform API token authentication only if values are provided
if os.getenv('DEEP_LYNX_API_KEY') != '' and os.getenv('DEEP_LYNX_API_KEY') is not None:
# authenticate via an API key and secret
auth_api = deep_lynx.AuthenticationApi(api_client)
try:
token = auth_api.retrieve_o_auth_token(x_api_key=os.getenv('DEEP_LYNX_API_KEY'),
x_api_secret=os.getenv('DEEP_LYNX_API_SECRET'),
x_api_expiry='12h')
except TypeError:
print("ERROR: Cannot connect to DeepLynx.")
logging.error("Cannot connect to DeepLynx.")
return '', '', None
# update header
api_client.set_default_header('Authorization', 'Bearer {}'.format(token))
# get container ID
container_id = None
container_api = deep_lynx.ContainersApi(api_client)
containers = container_api.list_containers()
for container in containers.value:
if container.name == os.getenv('CONTAINER_NAME'):
container_id = container.id
continue
if container_id is None:
print('Container not found')
return None, None, None
# get data source ID, create if necessary
data_source_id = None
datasources_api = deep_lynx.DataSourcesApi(api_client)
datasources = datasources_api.list_data_sources(container_id)
for datasource in datasources.value:
if datasource.name == os.getenv('DATA_SOURCE_NAME'):
data_source_id = datasource.id
if data_source_id is None:
datasource = datasources_api.create_data_source(
deep_lynx.CreateDataSourceRequest(os.getenv('DATA_SOURCE_NAME'), 'standard', True), container_id)
data_source_id = datasource.value.id
return container_id, data_source_id, api_client
|
stacoan.py | #!/bin/python
import codecs
import hashlib
import os
import sys
import webbrowser
import configparser
import argparse
import threading
import json
import multiprocessing
from threading import Thread
from multiprocessing import Process
from time import time
from helpers.logger import Logger
from helpers.project import Project
from helpers.report_html import Report_html
from helpers.searchwords import SearchLists
from helpers.server import ServerWrapper
def parse_args():
# Description
argument_width_in_help = 30
parser = argparse.ArgumentParser(description='StaCoAn is a crossplatform tool '
'which aids developers, bugbounty hunters and ethical hackers performing static '
'code analysis on mobile applications.',
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=argument_width_in_help))
# Arguments: see https://docs.python.org/3/library/argparse.html
parser.add_argument('-p', metavar="PATH", dest='project', required=False, nargs='+',
help='Relative path to the project')
parser.add_argument('--disable-browser', action='store_true', required=False,
help='Do not automatically open the HTML report in a browser')
parser.add_argument('--disable-server', action='store_true', required=False,
help='Do not run the server to drag and drop files to be analysed')
log_group = parser.add_mutually_exclusive_group(required=False)
log_group.add_argument('--log-all', action='store_true', help='Log all errors, warnings and info messages (default)')
log_group.add_argument('--log-errors', action='store_true', help='Log only errors')
log_group.add_argument('--log-warnings', action='store_true', help='Log only errors and warning messages')
# Check if the right parameters are set
args = parser.parse_args()
if args.disable_server and args.project is None:
parser.error("--disable-server requires the input file (application file) specified with -p")
if args.project:
args.disable_server == True
# return aur args, usage: args.argname
return args
# Note that this server(args) function CANNOT be placed in the server.py file. It calls "program()", which cannot be
# called from the server.py file
def server(args, server_disabled, DRAG_DROP_SERVER_PORT):
# Windows multithreading is different on Linux and windows (fork <-> new instance without parent context and args)
child=False
if os.name == 'nt':
if os.path.exists(".temp_thread_file"):
with open(".temp_thread_file") as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
args.project = [content[0]]
args.disable_server = content[1]
args.log_warnings = content[2]
args.disable_browser = content[3]
child = True
os.remove(".temp_thread_file")
else:
if os.path.exists(".temp_thread_file"):
child = True
os.remove(".temp_thread_file")
if (not(server_disabled or args.disable_server) or ((not len(sys.argv) > 1))) and (not child):
# This is a "bridge" between the stacoan program and the server. It communicates via this pipe (queue)
def serverlistener(in_q):
while True:
# Get some data
data = in_q.get()
if data == "KILLSERVERCOMMAND":
t1.isAlive = False
download_thread.isAlive = False
Logger("Server reports killed", Logger.INFO)
Logger("Exiting program! Bye. ", Logger.INFO)
exit(0)
# Process the data
args = argparse.Namespace(project=[data], disable_server=False, log_warnings=False, log_errors=False, disable_browser=True)
# On windows: write arguments to file, spawn process, read arguments from file, delete.
if os.name == 'nt':
with open('.temp_thread_file', 'a') as the_file:
the_file.write(data+"\n")
the_file.write("False\n") # disable_server
the_file.write("False\n") # log_warnings
the_file.write("True\n")
else:
with open('.temp_thread_file', 'a') as the_file:
the_file.write("filling")
p = Process(target=program, args=(args,))
p.start()
# Create report server instance
reportserver = ServerWrapper.create_reportserver()
download_thread = threading.Thread(target=reportserver.serve_forever)
download_thread.daemon = True
download_thread.start()
# Create the shared queue and launch both threads
t1 = Thread(target=serverlistener, args=(ServerWrapper.dragdropserver.q,))
t1.daemon = True
t1.start()
dragdropserver = ServerWrapper.create_drag_drop_server()
drag_drop_server_thread = threading.Thread(target=dragdropserver.serve_forever)
drag_drop_server_thread.daemon = True
drag_drop_server_thread.start()
if (not args.disable_browser) and not (args.disable_server or server_disabled):
# Open the webbrowser to the generated start page.
report_folder_start = "http:///127.0.0.1:" + str(DRAG_DROP_SERVER_PORT)
webbrowser.open(report_folder_start)
# Keep waiting until q is gone.
ServerWrapper.dragdropserver.q.join()
drag_drop_server_thread.join()
return() # Not needed because it will be killed eventually.
def program(args):
# Script cannot be called outside script directory. It contains a lot of os.getcwd().
if not os.path.dirname(os.path.abspath(__file__)) == os.getcwd():
Logger("Script cannot be called outside directory", Logger.ERROR)
# Keep track of execution time
start_time = time()
# Read information from config file
# Todo edit dockerfile with new path for report
# ToDo create a settings class that parses the ini file with set and get functions
config = configparser.ConfigParser()
config.read("config.ini")
server_disabled = config.getboolean("ProgramConfig", 'server_disabled')
DRAG_DROP_SERVER_PORT = json.loads(config.get("Server", 'drag_drop_server_port'))
# Update log level
if not (args.log_warnings or args.log_errors):
loglevel = 3
else:
loglevel = 1 if args.log_errors else 2
config.set('ProgramConfig', 'loglevel', str(loglevel))
with open("config.ini", "w+") as configfile:
config.write(configfile)
# Import the searchwords lists
# Searchwords.searchwords_import(Searchwords())
SearchLists()
# Server(args) checks if the server should be run and handles the spawning of the server and control of it
if not args.project:
server(args, server_disabled, DRAG_DROP_SERVER_PORT)
# For each project (read .ipa or .apk file), run the scripts.
all_project_paths = args.project
if not all_project_paths:
sys.exit(0)
for project_path in all_project_paths:
try:
Project.projects[project_path] = Project(project_path)
except:
sys.exit(0)
report_folder = os.path.join(Project.projects[project_path].name, config.get("ProgramConfig", 'report_folder'))
report_folder_start = os.path.join(os.getcwd(), report_folder, "start.html")
Logger("Decompiling app...")
Project.projects[project_path].app_prepper()
Logger("Decompiling done.")
Logger("Searching trough files")
Project.projects[project_path].searchcontroller()
Logger("Searching done.")
Logger("start generating report")
# ToDo: Generate the tree-view + Source code view for each SOURCE file
all_files = dict()
all_files.update(Project.projects[project_path].db_files)
all_files.update(Project.projects[project_path].src_files)
amount_files = len(all_files)
for i, file in enumerate(all_files):
Logger("progress: "+str(format((i/amount_files)*100, '.2f'))+"%", rewriteLine=True)
hash_object = hashlib.md5(file.encode('utf-8'))
file_report_file = os.path.join(report_folder, hash_object.hexdigest()+'.html')
overview_html = Report_html(Project.projects[project_path])
overview_html.header("tree")
overview_html.navigation()
overview_html.tree_view(Project.projects[project_path], file)
overview_html.footer()
f = codecs.open(file_report_file, 'w', encoding='utf8')
f.write(overview_html.gethtml())
# with open(file_report_file, 'w') as f:
# print(overview_html.gethtml(), file=f)
Logger("progress: 100% ")
# Generate the startpage
file_report_file = os.path.join(report_folder, 'start.html')
overview_html = Report_html(Project.projects[project_path])
overview_html.header("tree")
overview_html.navigation()
overview_html.tree_view(Project.projects[project_path], "")
overview_html.footer()
f = codecs.open(file_report_file, 'w', encoding='utf8')
f.write(overview_html.gethtml())
# with open(file_report_file, 'w') as f:
# print(overview_html.gethtml(), file=f)
# Generate words overview html file
words_overview_html_report_file = os.path.join(report_folder, "wordlist_overview.html")
words_overview_html = Report_html(Project.projects[project_path])
words_overview_html.header("words_overview")
words_overview_html.navigation()
words_overview_html.html_wordlist(Project.projects[project_path])
words_overview_html.footer()
with open(words_overview_html_report_file, 'w', encoding="utf-8") as f:
print(words_overview_html.gethtml(), file=f)
# Generate lootbox
lootbox_html_report_file = os.path.join(report_folder, "lootbox.html")
lootbox_html_report = Report_html(Project.projects[project_path])
lootbox_html_report.header("lootbox")
lootbox_html_report.navigation()
lootbox_html_report.lootbox()
lootbox_html_report.footer()
f = codecs.open(lootbox_html_report_file, 'w', encoding='utf8')
f.write(lootbox_html_report.gethtml())
# with open(lootbox_html_report_file, 'w') as f:
# print(lootbox_html_report.gethtml(), file=f)
# Generate the treeview
tree_js_file_path = os.path.join(report_folder, "tree_js_content.js")
f = codecs.open(tree_js_file_path, 'w', encoding='utf8')
f.write(Report_html.Tree_builder.tree_js_file(Project.projects[project_path]))
# with open(tree_js_file_path, 'w') as f:
# print(Report_html.Tree_builder.tree_js_file(Project.projects[project_path]), file=f)
# Generate looty.js file, for the zip creation process at the lootbox page
Report_html().make_loot_report_content()
# Write all log-events to logfile
Logger.dump()
# Log some end results
if loglevel == 3:
print("\n--------------------\n")
Logger("Static code analyzer completed succesfully in %fs." % (time() - start_time))
Logger("HTML report is available at: %s" % report_folder_start)
if (not args.disable_browser) or (args.disable_server or server_disabled):
Logger("Now automatically opening the HTML report.")
# Open the webbrowser to the generated start page.
if sys.platform == "darwin": # check if on OSX
# strip off http:///
report_folder_start = str(report_folder_start).strip("http:///")
report_folder_start = "file:///" + report_folder_start
webbrowser.open(report_folder_start)
# Exit program
sys.exit()
if __name__ == "__main__":
if os.path.exists(".temp_thread_file"):
os.remove(".temp_thread_file")
multiprocessing.freeze_support()
if os.environ.get('DEBUG') is not None:
program(parse_args())
exit(0)
try:
program(parse_args())
except Exception as e:
Logger("ERROR: Unknown error: %s." % str(e), Logger.ERROR)
|
splash.py | """Silly module mostly meant as an easter-egg."""
import threading
import time
from .. import term
from ..term import text
_banner = r'''
.:*~*:._.:*~*:._.:*~*:._.:*~*:._.:*~*:._.:*~*:._.:*~*:._.:*~*:._.:*~*:.
) _____ _ _ )
( | _ |___ _ _ _ ___ ___ ___ _| | | |_ _ _ (
) | __| . | | | | -_| _| -_| . | | . | | | )
( |__| |___|_____|___|_| |___|___| |___|_ | (
) _____ __ |___| __ )
( /\ __`\ /\ \ __/\ \ (
) \ \ \/\ \__ __ __ ____ \ \ \ /\_\ \ \___ )
( \ \ ,__/\ \/\ \/\ \ / _ `\ \ \ \ \/\ \ \ __`\ (
) \ \ \/\ \ \_/ \_/ \ \ \/\ \ \ \ \____\ \ \ \ \/\ \ )
( \ \_\ \ \___^___/'\ \_\ \_\ \ \_____\\ \_\ \____/ (
) \/_/ \/__//__/ \/_/\/_/ \/_____/ \/_/\/___/ )
( (
.:*~*:._.:*~*:._.:*~*:._.:*~*:._.:*~*:._.:*~*:._.:*~*:._.:*~*:._.:*~*:.
'''
def splash():
"""Put this at the beginning of your exploit to create the illusion that
your sploit is enterprisey and top notch quality"""
def updater():
colors = [
text.blue, text.bold_blue,
text.magenta, text.bold_magenta,
text.red, text.bold_red,
text.yellow, text.bold_yellow,
text.green, text.bold_green,
text.cyan, text.bold_cyan,
]
def getcolor(n):
return colors[(n // 4) % len(colors)]
lines = [' ' + line + '\n' for line in _banner.strip('\n').split('\n')]
hs = [term.output('', frozen=False) for _ in range(len(lines))]
ndx = 0
import sys as _sys
while _sys:
for i, (l, h) in enumerate(zip(lines, hs)):
cur = ''
buf = ''
col = getcolor(ndx + i)
for j in range(len(l)):
buf += l[j]
ncol = getcolor(ndx + i + j)
if col != ncol:
cur += buf if buf.isspace() else col(buf)
col = ncol
buf = ''
cur += col(buf)
h.update(cur)
ndx += 1
time.sleep(0.15)
if term.term_mode:
t = threading.Thread(target=updater)
t.daemon = True
t.start()
time.sleep(0.2)
|
build_imagenet_data.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
from six.moves import xrange
import tensorflow as tf
from six.moves import xrange
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
test_json.py | # Standard library imports...
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
import os
# Third-party imports...
import requests
import json
import unittest
from unittest.mock import Mock
from convert_alert import convert_sd_to_ms
from main import send_to_teams
http_port = 6000
def run_mock_server():
mock_server = HTTPServer(('localhost', http_port), MockServerRequestHandler)
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
class TestJSON(unittest.TestCase):
def test_valid_input_sd_json(self):
with open("test_data/sd_valid_incident_closed.json") as json_valid, \
open('test_data/teams_valid_incident_closed.json') as json_teams:
incoming_json = json.load(json_valid)
teams_valid_json = json.load(json_teams)
teams_json = convert_sd_to_ms(incoming_json)
self.assertEqual(teams_valid_json, teams_json)
def test_cf_with_valid_sd_json(self):
with open("test_data/sd_valid_incident_closed.json") as json_valid:
incoming_json = json.load(json_valid)
run_mock_server()
req = Mock(get_json=Mock(return_value=incoming_json))
self.assertEqual(send_to_teams(req), "OK")
class MockServerRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
self.send_response(requests.codes.ok)
self.end_headers()
return
|
pantsd_integration_test.py | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import glob
import os
import signal
import threading
import time
import unittest
from textwrap import dedent
from typing import List, Optional
import psutil
import pytest
from pants.testutil.pants_integration_test import read_pants_log, setup_tmpdir, temporary_workdir
from pants.util.contextutil import environment_as, temporary_dir, temporary_file
from pants.util.dirutil import rm_rf, safe_file_dump, safe_mkdir, safe_open, safe_rmtree, touch
from pants_test.pantsd.pantsd_integration_test_base import (
PantsDaemonIntegrationTestBase,
launch_waiter,
)
def launch_file_toucher(f):
"""Launch a loop to touch the given file, and return a function to call to stop and join it."""
if not os.path.isfile(f):
raise AssertionError("Refusing to touch a non-file.")
halt = threading.Event()
def file_toucher():
while not halt.isSet():
touch(f)
time.sleep(1)
thread = threading.Thread(target=file_toucher)
thread.daemon = True
thread.start()
def join():
halt.set()
thread.join(timeout=10)
return join
compilation_failure_dir_layout = {
os.path.join("compilation_failure", "main.py"): "if __name__ == '__main__':\n import sys¡",
os.path.join(
"compilation_failure", "BUILD"
): "python_library()\npex_binary(name='bin', entry_point='main.py')",
}
class TestPantsDaemonIntegration(PantsDaemonIntegrationTestBase):
hermetic = False
def test_pantsd_run(self):
with self.pantsd_successful_run_context(log_level="debug") as ctx:
with setup_tmpdir({"foo/BUILD": "files(sources=[])"}) as tmpdir:
ctx.runner(["list", f"{tmpdir}/foo::"])
ctx.checker.assert_started()
ctx.runner(["list", f"{tmpdir}/foo::"])
ctx.checker.assert_running()
def test_pantsd_broken_pipe(self):
with self.pantsd_test_context() as (workdir, pantsd_config, checker):
run = self.run_pants_with_workdir(
"help | head -1", workdir=workdir, config=pantsd_config, shell=True
)
self.assertNotIn("broken pipe", run.stderr.lower())
checker.assert_started()
def test_pantsd_pantsd_runner_doesnt_die_after_failed_run(self):
with self.pantsd_test_context() as (workdir, pantsd_config, checker):
# Run target that throws an exception in pants.
with setup_tmpdir(compilation_failure_dir_layout) as tmpdir:
self.run_pants_with_workdir(
["lint", os.path.join(tmpdir, "compilation_failure", "main.py")],
workdir=workdir,
config=pantsd_config,
).assert_failure()
checker.assert_started()
# Assert pantsd is in a good functional state.
self.run_pants_with_workdir(
["help"], workdir=workdir, config=pantsd_config
).assert_success()
checker.assert_running()
def test_pantsd_lifecycle_invalidation(self):
"""Run with different values of daemon=True options, which should trigger restarts."""
with self.pantsd_successful_run_context() as ctx:
last_pid = None
for idx in range(3):
# Run with a different value of a daemon=True option in each iteration.
ctx.runner([f"--pantsd-invalidation-globs=ridiculous{idx}", "help"])
next_pid = ctx.checker.assert_started()
if last_pid is not None:
self.assertNotEqual(last_pid, next_pid)
last_pid = next_pid
def test_pantsd_lifecycle_non_invalidation(self):
with self.pantsd_successful_run_context() as ctx:
cmds = (["help"], ["--no-colors", "help"], ["help"])
last_pid = None
for cmd in cmds:
# Run with a CLI flag.
ctx.runner(cmd)
next_pid = ctx.checker.assert_started()
if last_pid is not None:
self.assertEqual(last_pid, next_pid)
last_pid = next_pid
def test_pantsd_lifecycle_non_invalidation_on_config_string(self):
with temporary_dir() as dist_dir_root, temporary_dir() as config_dir:
# Create a variety of config files that change an option that does _not_ affect the
# daemon's fingerprint (only the Scheduler's), and confirm that it stays up.
config_files = [
os.path.abspath(os.path.join(config_dir, f"pants.{i}.toml")) for i in range(3)
]
for idx, config_file in enumerate(config_files):
print(f"writing {config_file}")
with open(config_file, "w") as fh:
fh.write(
f"""[GLOBAL]\npants_distdir = "{os.path.join(dist_dir_root, str(idx))}"\n"""
)
with self.pantsd_successful_run_context() as ctx:
cmds = [[f"--pants-config-files={f}", "help"] for f in config_files]
last_pid = None
for cmd in cmds:
ctx.runner(cmd)
next_pid = ctx.checker.assert_started()
if last_pid is not None:
self.assertEqual(last_pid, next_pid)
last_pid = next_pid
def test_pantsd_lifecycle_shutdown_for_broken_scheduler(self):
with self.pantsd_test_context() as (workdir, config, checker):
# Run with valid options.
self.run_pants_with_workdir(["help"], workdir=workdir, config=config).assert_success()
checker.assert_started()
# And again with invalid scheduler-fingerprinted options that trigger a re-init.
self.run_pants_with_workdir(
["--backend-packages=nonsensical", "help"], workdir=workdir, config=config
).assert_failure()
checker.assert_stopped()
def test_pantsd_aligned_output(self) -> None:
# Set for pytest output display.
self.maxDiff = None
cmds = [["help", "goals"], ["help", "targets"], ["roots"]]
config = {
"GLOBAL": {
# These must match the ones we configure in pantsd_integration_test_base.py.
"backend_packages": ["pants.backend.python", "pants.backend.python.lint.flake8"],
}
}
non_daemon_runs = [self.run_pants(cmd, config=config) for cmd in cmds]
with self.pantsd_successful_run_context() as ctx:
daemon_runs = [ctx.runner(cmd) for cmd in cmds]
ctx.checker.assert_started()
for cmd, run in zip(cmds, daemon_runs):
print(f"(cmd, run) = ({cmd}, {run.stdout}, {run.stderr})")
self.assertNotEqual(run.stdout, "", f"Empty stdout for {cmd}")
for run_pair in zip(non_daemon_runs, daemon_runs):
non_daemon_stdout = run_pair[0].stdout
daemon_stdout = run_pair[1].stdout
for line_pair in zip(non_daemon_stdout.splitlines(), daemon_stdout.splitlines()):
assert line_pair[0] == line_pair[1]
@unittest.skip("flaky: https://github.com/pantsbuild/pants/issues/7622")
def test_pantsd_filesystem_invalidation(self):
"""Runs with pantsd enabled, in a loop, while another thread invalidates files."""
with self.pantsd_successful_run_context() as ctx:
cmd = ["list", "::"]
ctx.runner(cmd)
ctx.checker.assert_started()
# Launch a separate thread to poke files in 3rdparty.
join = launch_file_toucher("3rdparty/jvm/com/google/auto/value/BUILD")
# Repeatedly re-list 3rdparty while the file is being invalidated.
for _ in range(0, 16):
ctx.runner(cmd)
ctx.checker.assert_running()
join()
def test_pantsd_client_env_var_is_inherited_by_pantsd_runner_children(self):
expected_key = "TEST_ENV_VAR_FOR_PANTSD_INTEGRATION_TEST"
expected_value = "333"
with self.pantsd_successful_run_context() as ctx:
# First, launch the daemon without any local env vars set.
ctx.runner(["help"])
ctx.checker.assert_started()
# Then, set an env var on the secondary call.
# We additionally set the `HERMETIC_ENV` env var to allow the integration test harness
# to pass this variable through.
env = {
expected_key: expected_value,
"HERMETIC_ENV": expected_key,
}
with environment_as(**env):
result = ctx.runner(
["run", "testprojects/src/python/print_env", "--", expected_key]
)
ctx.checker.assert_running()
self.assertEqual(expected_value, "".join(result.stdout).strip())
def test_pantsd_launch_env_var_is_not_inherited_by_pantsd_runner_children(self):
with self.pantsd_test_context() as (workdir, pantsd_config, checker):
with environment_as(NO_LEAKS="33"):
self.run_pants_with_workdir(
["help"], workdir=workdir, config=pantsd_config
).assert_success()
checker.assert_started()
self.run_pants_with_workdir(
["run", "testprojects/src/python/print_env", "--", "NO_LEAKS"],
workdir=workdir,
config=pantsd_config,
).assert_failure()
checker.assert_running()
def test_pantsd_touching_a_file_does_not_restart_daemon(self):
test_file = "testprojects/src/python/print_env/main.py"
config = {
"GLOBAL": {"pantsd_invalidation_globs": '["testprojects/src/python/print_env/*"]'}
}
with self.pantsd_successful_run_context(extra_config=config) as ctx:
ctx.runner(["help"])
ctx.checker.assert_started()
# Let any fs events quiesce.
time.sleep(5)
ctx.checker.assert_running()
touch(test_file)
# Permit ample time for the async file event propagate in CI.
time.sleep(10)
ctx.checker.assert_running()
def test_pantsd_invalidation_file_tracking(self):
test_dir = "testprojects/src/python/print_env"
config = {"GLOBAL": {"pantsd_invalidation_globs": f'["{test_dir}/*"]'}}
with self.pantsd_successful_run_context(extra_config=config) as ctx:
ctx.runner(["help"])
ctx.checker.assert_started()
# Let any fs events quiesce.
time.sleep(5)
ctx.checker.assert_running()
def full_pants_log():
return "\n".join(read_pants_log(ctx.workdir))
# Create a new file in test_dir
with temporary_file(suffix=".py", binary_mode=False, root_dir=test_dir) as temp_f:
temp_f.write("import that\n")
temp_f.close()
ctx.checker.assert_stopped()
self.assertIn("saw filesystem changes covered by invalidation globs", full_pants_log())
def test_pantsd_invalidation_pants_toml_file(self):
# Test tmp_pants_toml (--pants-config-files=$tmp_pants_toml)'s removal
tmp_pants_toml = os.path.abspath("testprojects/test_pants.toml")
# Create tmp_pants_toml file
with safe_open(tmp_pants_toml, "w") as f:
f.write("[DEFAULT]\n")
with self.pantsd_successful_run_context() as ctx:
ctx.runner([f"--pants-config-files={tmp_pants_toml}", "help"])
ctx.checker.assert_started()
time.sleep(10)
# Delete tmp_pants_toml
os.unlink(tmp_pants_toml)
ctx.checker.assert_stopped()
def test_pantsd_pid_deleted(self):
with self.pantsd_successful_run_context() as ctx:
ctx.runner(["help"])
ctx.checker.assert_started()
# Let any fs events quiesce.
time.sleep(10)
ctx.checker.assert_running()
subprocess_dir = ctx.pantsd_config["GLOBAL"]["pants_subprocessdir"]
safe_rmtree(subprocess_dir)
ctx.checker.assert_stopped()
def test_pantsd_pid_change(self):
with self.pantsd_successful_run_context() as ctx:
ctx.runner(["help"])
ctx.checker.assert_started()
# Let any fs events quiesce.
time.sleep(10)
ctx.checker.assert_running()
subprocess_dir = ctx.pantsd_config["GLOBAL"]["pants_subprocessdir"]
(pidpath,) = glob.glob(os.path.join(subprocess_dir, "*", "pantsd", "pid"))
with open(pidpath, "w") as f:
f.write("9")
ctx.checker.assert_stopped()
# Remove the pidfile so that the teardown script doesn't try to kill process 9.
os.unlink(pidpath)
@pytest.mark.skip(reason="flaky: https://github.com/pantsbuild/pants/issues/8193")
def test_pantsd_memory_usage(self):
"""Validates that after N runs, memory usage has increased by no more than X percent."""
number_of_runs = 10
max_memory_increase_fraction = 0.40 # TODO https://github.com/pantsbuild/pants/issues/7647
with self.pantsd_successful_run_context() as ctx:
# NB: This doesn't actually run against all testprojects, only those that are in the chroot,
# i.e. explicitly declared in this test file's BUILD.
cmd = ["list", "testprojects::"]
ctx.runner(cmd).assert_success()
initial_memory_usage = ctx.checker.current_memory_usage()
for _ in range(number_of_runs):
ctx.runner(cmd).assert_success()
ctx.checker.assert_running()
final_memory_usage = ctx.checker.current_memory_usage()
self.assertTrue(
initial_memory_usage <= final_memory_usage,
"Memory usage inverted unexpectedly: {} > {}".format(
initial_memory_usage, final_memory_usage
),
)
increase_fraction = (float(final_memory_usage) / initial_memory_usage) - 1.0
self.assertTrue(
increase_fraction <= max_memory_increase_fraction,
"Memory usage increased more than expected: {} -> {}: {} actual increase (expected < {})".format(
initial_memory_usage,
final_memory_usage,
increase_fraction,
max_memory_increase_fraction,
),
)
def test_pantsd_max_memory_usage(self):
"""Validates that the max_memory_usage setting is respected."""
# We set a very, very low max memory usage, which forces pantsd to restart immediately.
max_memory_usage_bytes = 130
with self.pantsd_successful_run_context() as ctx:
# TODO: We run the command, but we expect it to race pantsd shutting down, so we don't
# assert success. https://github.com/pantsbuild/pants/issues/8200 will address waiting
# until after the current command completes to invalidate the scheduler, at which point
# we can assert success here.
ctx.runner(
[f"--pantsd-max-memory-usage={max_memory_usage_bytes}", "list", "testprojects::"]
)
# Assert that a pid file is written, but that the server stops afterward.
ctx.checker.assert_started_and_stopped()
def test_pantsd_invalidation_stale_sources(self):
test_path = "daemon_correctness_test_0001"
test_build_file = os.path.join(test_path, "BUILD")
test_src_file = os.path.join(test_path, "some_file.py")
filedeps_cmd = ["--files-not-found-behavior=warn", "filedeps", test_path]
try:
with self.pantsd_successful_run_context() as ctx:
safe_mkdir(test_path, clean=True)
ctx.runner(["help"])
ctx.checker.assert_started()
safe_file_dump(
test_build_file, "python_library(sources=['some_non_existent_file.py'])"
)
non_existent_file = os.path.join(test_path, "some_non_existent_file.py")
result = ctx.runner(filedeps_cmd)
ctx.checker.assert_running()
assert non_existent_file not in result.stdout
safe_file_dump(test_build_file, "python_library(sources=['*.py'])")
result = ctx.runner(filedeps_cmd)
ctx.checker.assert_running()
assert non_existent_file not in result.stdout
safe_file_dump(test_src_file, "print('hello')\n")
result = ctx.runner(filedeps_cmd)
ctx.checker.assert_running()
assert test_src_file in result.stdout
finally:
rm_rf(test_path)
@unittest.skip("TODO https://github.com/pantsbuild/pants/issues/7654")
def test_pantsd_parse_exception_success(self):
# This test covers the case described in #6426, where a run that is failing fast due to an
# exception can race other completing work. We expect all runs to fail due to the error
# that has been introduced, but none of them should hang.
test_path = "testprojects/3rdparty/this_is_definitely_not_a_valid_directory"
test_build_file = os.path.join(test_path, "BUILD")
invalid_symbol = "this_is_definitely_not_a_valid_symbol"
try:
safe_mkdir(test_path, clean=True)
safe_file_dump(test_build_file, f"{invalid_symbol}()")
for _ in range(3):
with self.pantsd_run_context(success=False) as ctx:
result = ctx.runner(["list", "testprojects::"])
ctx.checker.assert_started()
self.assertIn(invalid_symbol, result.stderr)
finally:
rm_rf(test_path)
def _assert_pantsd_keyboardinterrupt_signal(
self, signum: int, regexps: Optional[List[str]] = None
):
"""Send a signal to the thin pailgun client and observe the error messaging.
:param signum: The signal to send.
:param regexps: Assert that all of these regexps match somewhere in stderr.
"""
with self.pantsd_test_context() as (workdir, config, checker):
client_handle, waiter_process_pid, _ = launch_waiter(workdir=workdir, config=config)
client_pid = client_handle.process.pid
waiter_process = psutil.Process(waiter_process_pid)
assert waiter_process.is_running()
checker.assert_started()
# This should kill the client, which will cancel the run on the server, which will
# kill the waiting process.
os.kill(client_pid, signum)
client_run = client_handle.join()
client_run.assert_failure()
for regexp in regexps or []:
self.assertRegex(client_run.stderr, regexp)
# pantsd should still be running, but the waiter process should have been killed.
time.sleep(5)
assert not waiter_process.is_running()
checker.assert_running()
def test_pantsd_sigint(self):
self._assert_pantsd_keyboardinterrupt_signal(
signal.SIGINT,
regexps=["Interrupted by user."],
)
def test_sigint_kills_request_waiting_for_lock(self):
"""Test that, when a pailgun request is blocked waiting for another one to end, sending
SIGINT to the blocked run will kill it."""
config = {"GLOBAL": {"pantsd_timeout_when_multiple_invocations": -1, "level": "debug"}}
with self.pantsd_test_context(extra_config=config) as (workdir, config, checker):
# Run a process that will wait forever.
first_run_handle, _, file_to_create = launch_waiter(workdir=workdir, config=config)
checker.assert_started()
checker.assert_running()
# And another that will block on the first.
blocking_run_handle = self.run_pants_with_workdir_without_waiting(
command=["goals"], workdir=workdir, config=config
)
# Block until the second request is waiting for the lock.
time.sleep(10)
# Sends SIGINT to the run that is waiting.
blocking_run_client_pid = blocking_run_handle.process.pid
os.kill(blocking_run_client_pid, signal.SIGINT)
blocking_run_handle.join()
# Check that pantsd is still serving the other request.
checker.assert_running()
# Exit the second run by writing the file it is waiting for, and confirm that it
# exited, and that pantsd is still running.
safe_file_dump(file_to_create, "content!")
result = first_run_handle.join()
result.assert_success()
checker.assert_running()
def test_pantsd_unicode_environment(self):
with self.pantsd_successful_run_context(extra_env={"XXX": "¡"}) as ctx:
result = ctx.runner(["help"])
ctx.checker.assert_started()
result.assert_success()
# This is a regression test for a bug where we would incorrectly detect a cycle if two targets swapped their
# dependency relationship (#7404).
def test_dependencies_swap(self):
template = dedent(
"""
python_library(
name = 'A',
source = 'A.py',
{a_deps}
)
python_library(
name = 'B',
source = 'B.py',
{b_deps}
)
"""
)
with self.pantsd_successful_run_context() as ctx:
with temporary_dir(".") as directory:
safe_file_dump(os.path.join(directory, "A.py"), mode="w")
safe_file_dump(os.path.join(directory, "B.py"), mode="w")
if directory.startswith("./"):
directory = directory[2:]
def list_and_verify():
result = ctx.runner(["list", f"{directory}:"])
ctx.checker.assert_started()
result.assert_success()
expected_targets = {f"{directory}:{target}" for target in ("A", "B")}
self.assertEqual(expected_targets, set(result.stdout.strip().split("\n")))
with open(os.path.join(directory, "BUILD"), "w") as f:
f.write(template.format(a_deps='dependencies = [":B"],', b_deps=""))
list_and_verify()
with open(os.path.join(directory, "BUILD"), "w") as f:
f.write(template.format(a_deps="", b_deps='dependencies = [":A"],'))
list_and_verify()
def test_concurrent_overrides_pantsd(self):
"""Tests that the --concurrent flag overrides the --pantsd flag, because we don't allow
concurrent runs under pantsd."""
config = {"GLOBAL": {"concurrent": True, "pantsd": True}}
with temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir(
["-ldebug", "help", "goals"], workdir=workdir, config=config
)
pants_run.assert_success()
self.assertNotIn("Connecting to pantsd", pants_run.stderr)
def test_unhandled_exceptions_only_log_exceptions_once(self):
"""Tests that the unhandled exceptions triggered by LocalPantsRunner instances don't
manifest as a PantsRunFinishedWithFailureException.
That is, that we unset the global Exiter override set by LocalPantsRunner before we try to log the exception.
This is a regression test for the most glaring case of https://github.com/pantsbuild/pants/issues/7597.
"""
with self.pantsd_run_context(success=False) as ctx:
result = ctx.runner(["run", "testprojects/src/python/bad_requirements:use_badreq"])
ctx.checker.assert_running()
result.assert_failure()
# Assert that the desired exception has been triggered once.
self.assertRegex(result.stderr, r"ERROR:.*badreq==99.99.99")
# Assert that it has only been triggered once.
self.assertNotIn(
"During handling of the above exception, another exception occurred:",
result.stderr,
)
self.assertNotIn(
"pants.bin.daemon_pants_runner._PantsRunFinishedWithFailureException: Terminated with 1",
result.stderr,
)
|
uiFrontend.py |
import dbApi
import scanner.runState
import logging
import os.path
import queue
import threading
import time
import sys
import tqdm
class UiReadout(object):
def __init__(self, hashQueue, monitorQueue):
self.log = logging.getLogger("Main.UI")
self.hashQueue = hashQueue
self.processingHashQueue = monitorQueue
self.stopOnEmpty = False
self.stopped = False
def run(self):
commits = 0
pbar = tqdm
skipped = 0
match = 0
clean = 0
processed = 0
qmax = 0
processed
pbar = tqdm.tqdm()
while scanner.runState.run:
try:
item = self.hashQueue.get(timeout=0.1)
pbar.update()
if item == "skipped":
skipped +=1
elif item == "hash_match":
match += 1
elif item == "clean":
clean += 1
elif item == "processed":
processed += 1
else:
print()
print()
print("WAT?")
print()
print(item)
print()
print()
qmax = max(qmax, self.processingHashQueue.qsize())
pbar.total = qmax + skipped + match + clean + processed
pbar.set_description("Hasher: %s remaining, %s skipped, %s match, %s clean, %s processed" % (
self.processingHashQueue.qsize(), skipped, match, clean, processed
))
except queue.Empty:
if self.stopOnEmpty:
break
pass
self.log.info("UI Thread Exiting")
self.stopped = True
def startThread(self):
self.log.info("Starting thread")
dbTh = threading.Thread(target=self.run)
dbTh.start()
self.log.info("Thread started")
def gracefulShutdown(self):
self.stopOnEmpty = True
while not self.stopped:
time.sleep(0.5)
|
hw_thread_sbox.py |
from threading import Thread, Lock
#dictionary of async objects:
'''
at beginning/creation, a dictionary (maybe) of all potential threading objects
will be created to house the thread functions, etc...
i
'''
'''
https://stackoverflow.com/questions/15729498/how-to-start-and-stop-thread
'''
bproc = {}
#will contain:
bproc['runner'] = {}
bproc['runner'] = {'trigger':{'on':'runner','off':'stopprunner'},'function':runner1,'thread'
def runner1():
while True:
print("hello")
#example Flask usage:
#all hardware threading will be handled through one high-level socket router.
@app.route('/thread_command')
def index(comm_val):
global threads #give access to threads dictionary
if threads['runner1'] is None:
threads['runner1'] = Thread(target=runner1)
threads['runner1'].daemon = True
threads['runner1'].start()
return render_template('time_series_example.html')
|
main_window.py | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum_dash
from electrum_dash import WalletStorage, Wallet
from electrum_dash_gui.kivy.i18n import _
from electrum_dash.contacts import Contacts
from electrum_dash.paymentrequest import InvoiceStore
from electrum_dash.util import profiler, InvalidPassword
from electrum_dash.plugins import run_hook
from electrum_dash.util import format_satoshis, format_satoshis_plain
from electrum_dash.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch, metrics
from kivy.lang import Builder
# lazy imports for factory so that widgets can be used in kv
Factory.register('InstallWizard',
module='electrum_gui.kivy.uix.dialogs.installwizard')
Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.uix.checkbox import CheckBox
from kivy.uix.switch import Switch
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum_dash.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'dash':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def on_quotes(self, d):
#Logger.info("on_quotes")
pass
def on_history(self, d):
#Logger.info("on_history")
if self.history_screen:
Clock.schedule_once(lambda dt: self.history_screen.update())
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mBTC')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self.update_status()
if self.history_screen:
self.history_screen.update()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
if self.history_screen:
self.history_screen.update()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = run_hook('exchange_rate')
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = run_hook('exchange_rate')
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
wallet = ObjectProperty(None)
'''Holds the electrum wallet
:attr:`wallet` is a `ObjectProperty` defaults to None.
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
super(ElectrumWindow, self).__init__(**kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
#self.config = self.gui_object.config
self.contacts = Contacts(self.electrum_config)
self.invoices = InvoiceStore(self.electrum_config)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet =\
Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status =\
Clock.create_trigger(self.update_status, .5)
self._trigger_notify_transactions = \
Clock.create_trigger(self.notify_transactions, 5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def on_pr(self, pr):
if pr.verify(self.contacts):
key = self.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_dash.bitcoin import base_decode, is_address
if is_address(data):
self.set_URI(data)
return
if data.startswith('dash:'):
self.set_URI(data)
return
# try to decode transaction
from electrum_dash.transaction import Transaction
try:
text = base_decode(data, None, base=43).encode('hex')
tx = Transaction(text)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'requests']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if self.send_screen is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum_dash.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.open()
def qr_dialog(self, title, data, show_text=False):
from uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.renpy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
intent = Intent("com.google.zxing.client.android.SCAN")
intent.putExtra("SCAN_MODE", "QR_CODE_MODE")
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
try:
PythonActivity.mActivity.startActivityForResult(intent, 0)
except:
self.show_error(_('Could not start Barcode Scanner.') + ' ' + _('Please install the Barcode Scanner app from ZXing'))
def scan_qr_zxing(self, on_complete):
# uses zxing embedded lib
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.renpy.android.PythonActivity')
IntentIntegrator = autoclass('com.google.zxing.integration.android.IntentIntegrator')
integrator = IntentIntegrator(PythonActivity.mActivity)
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
integrator.initiateScan()
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.renpy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
Logger.info("dpi: {} {}".format(metrics.dpi, metrics.dpi_rounded))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# default tab
self.switch_to('history')
# bind intent for dash: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.renpy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def load_wallet_by_name(self, wallet_path):
if not wallet_path:
return
config = self.electrum_config
try:
storage = WalletStorage(wallet_path)
except IOError:
self.show_error("Cannot read wallet file")
return
if storage.file_exists:
wallet = Wallet(storage)
action = wallet.get_action()
else:
action = 'new'
if action is not None:
# start installation wizard
Logger.debug('Electrum: Wallet not found. Launching install wizard')
wizard = Factory.InstallWizard(config, self.network, storage)
wizard.bind(on_wizard_complete=lambda instance, wallet: self.load_wallet(wallet))
wizard.run(action)
else:
self.load_wallet(wallet)
self.on_resume()
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.wallet.stop_threads()
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
self.is_exit = False
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
if self._settings_dialog is None:
from uix.dialogs.settings import SettingsDialog
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.icon = "icons/electrum.png"
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction']
self.network.register_callback(self.on_network, interests)
#self.wallet = None
self.tabs = self.root.ids['tabs']
def on_network(self, event, *args):
if event == 'updated':
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_notify_transactions(*args)
@profiler
def load_wallet(self, wallet):
self.stop_wallet()
self.wallet = wallet
self.wallet.start_threads(self.network)
self.current_account = self.wallet.storage.get('current_account', None)
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
self.notify_transactions()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
self.status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
self.status = _("Synchronizing...")
elif server_lag > 1:
self.status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_account_balance(self.current_account)
text = self.format_amount(c+x+u)
self.status = str(text.strip() + ' ' + self.base_unit)
else:
self.status = _("Not connected")
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
amount, fee = self.wallet.get_max_amount(self.electrum_config, inputs, addr, None)
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
@profiler
def notify_transactions(self, *dt):
if not self.network or not self.network.is_connected():
return
# temporarily disabled for merge
return
iface = self.network
ptfn = iface.pending_transactions_for_notifications
if len(ptfn) > 0:
# Combine the transactions if there are more then three
tx_amount = len(ptfn)
if(tx_amount >= 3):
total_amount = 0
for tx in ptfn:
is_relevant, is_mine, v, fee = self.wallet.get_tx_value(tx)
if(v > 0):
total_amount += v
self.notify(_("{txs}s new transactions received. Total amount"
"received in the new transactions {amount}s"
"{unit}s").format(txs=tx_amount,
amount=self.format_amount(total_amount),
unit=self.base_unit()))
iface.pending_transactions_for_notifications = []
else:
for tx in iface.pending_transactions_for_notifications:
if tx:
iface.pending_transactions_for_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_tx_value(tx)
if(v > 0):
self.notify(
_("{txs} new transaction received. {amount} {unit}").
format(txs=tx_amount, amount=self.format_amount(v),
unit=self.base_unit))
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, txid):
self.show_info(txid)
if ok and pr:
pr.set_paid(tx.hash())
self.invoices.save()
self.update_tab('invoices')
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.use_encryption:
self.password_dialog(msg, f, args)
else:
apply(f, args + (None,))
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.use_encryption and password is None:
return
try:
seed = self.wallet.get_seed(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
def change_password(self, cb):
if self.wallet.use_encryption:
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.use_encryption:
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
def callback(pw):
Clock.schedule_once(lambda x: apply(f, args + (pw,)), 0.1)
if self._password_dialog is None:
from uix.dialogs.password_dialog import PasswordDialog
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
|
network_monitor.py | #!/usr/bin/env python
import optparse
import pymnl
import traceback
import time
import re
import sys
import threading
import uuid
from pymnl.nlsocket import Socket
from pymnl.message import Payload
from pymnl.attributes import AttrParser
from struct import calcsize, unpack
from utils.utils_log import utils_log
from utils.utils_cmd import execute_sys_cmd
from lib_monitor.monitor_default_format import send_nsca
# Netlink constants
RTMGRP_LINK = 1
IFF_UP = 0x1
IFF_RUNNING = 0x40
IFLA_IFNAME = 3
IFLA_MTU = 4
_re_interface_name = re.compile(r'eth\d+|bond\d+')
_logger = utils_log('network_monitor').logger
_dev_notify = dict() # Used for check whether notify is obsolete
def parse_link_msg(payload):
"""
Parse link level specific information
:param payload:
:return:
"""
content_format = 'BBHiII'
len_format = calcsize(content_format)
content = payload[:len_format]
family, pad, type_, index, flags, change = unpack(content_format, content)
ifinfo= {'family': family,
'type': type_,
'index': index,
'flags': flags,
'change': change,
'payload': payload[len_format:], # Remain data
}
return ifinfo
def notify_status(notify_id, device, up, link, speed, ip):
"""
Notify platform manager with status os network interface
"""
status = 'up' if up is True else 'down'
link = 'linked' if link else 'unlinked'
output = '_'.join([device, status, link, speed])
for _ in xrange(60):
if notify_id != _dev_notify[device]:
_logger.info('Notify obsolete. %s', output)
return
_logger.info('Notify message %s', output)
if send_nsca(ip, "Check_interface_state", 1, output, 'check_interface.log'):
break
_logger.error('Notify not complete, platform manager is not ready. [%s]', sys.exc_info()[0])
time.sleep(30)
else:
_logger.error('Notify device status failed')
def monitor_device(ip):
sock = Socket(pymnl.NETLINK_ROUTE)
sock.bind(pymnl.nlsocket.SOCKET_AUTOPID, RTMGRP_LINK)
_logger.info('Start listening netlink RTMGRP_LINK')
try:
while True:
msg_list = sock.recv()
for msg in msg_list:
if msg.get_errno():
_logger.error((msg.get_errstr()))
continue
link = parse_link_msg(msg.get_payload().get_binary())
ifla_payload = Payload(link['payload'])
attr_parser = AttrParser(ifla_payload)
attr_if_name = [a for a in attr_parser.get_attrs() if a.get_type() == IFLA_IFNAME]
if not attr_if_name:
_logger.info('Cannot get interface name attribute')
continue
notify_param = dict(device=attr_if_name[0].get_str_stripped())
if not _re_interface_name.match(notify_param['device']):
continue
# notify_msg['running'] = True if link['flags'] & IFF_RUNNING else False
notify_param['up'] = True if link['flags'] & IFF_UP else False
is_link, _ = execute_sys_cmd('sudo ethtool {0} | grep "Link detected: yes"'.format(notify_param['device']))
notify_param['link'] = True if is_link else False
check, result = execute_sys_cmd('sudo ethtool {0} | grep "Speed"'.format(notify_param['device']))
speed = None
if check:
m_speed = re.search(r'Speed:\s*([\d]+)Mb/s', result[0])
speed = m_speed.group(1) if m_speed else None
speed = str(float(speed)/1000) if speed > 0 else '0'
notify_param['speed'] = speed if speed is not None else None
_logger.info(notify_param)
# Notify platform manager
notify_id = uuid.uuid4()
_dev_notify[notify_param['device']] = notify_id
notify_param['notify_id'] = notify_id
notify_param['ip'] = ip
threading.Thread(target=notify_status, kwargs=notify_param).start()
except:
traceback.print_exc()
sock.close()
if __name__ == '__main__':
parser = optparse.OptionParser(
usage="%prog [options] [--parameter]",
description="To monitor network interface state."
)
parser.add_option("--ip",
dest="ip",
help="Node ip",
type="string",
default="192.168.136.254"
)
(options, args) = parser.parse_args()
monitor_device(options.ip)
|
passive_proxy_scraper.py | import requests, threading, time, ctypes
## CONFIG ##
title_update_delay = .1
def title():
while True:
ctypes.windll.kernel32.SetConsoleTitleW(f"cVenge's Passive Proxy Scraper | Valid: {valid} | Invalid: {invalid} | Scraped: {scraped} |")
time.sleep(title_update_delay)
def menu():
while True:
print("cVenge's Passive Proxy Scraper \n[1] Save valids to proxies.txt")
userinput = input("Enter What process to do -> ")
if userinput == "1":
global valid_proxies
print("Saving..")
with open("proxies.txt", "w") as f:
for x in valid_proxies:
f.write(x + '\n')
print("Saved!")
else:
print("Invalid option!")
continue
def main():
while True:
global scraped
global scraped_proxies
global valid_proxies
global invalid
global valid
r1 = requests.get("https://api.proxyscrape.com/v2/?request=displayproxies&protocol=http&timeout=10000&country=all&ssl=yes&anonymity=elite")
res = r1.text.split("\n")
for p in res:
scraped_proxies.append(p.strip())
scraped += len(scraped_proxies)
for prox in scraped_proxies:
try:
r = requests.get("http://www.google.com/", proxies={"http": f"http://{prox}", "https": f"https://{prox}"}, timeout=5)
valid_proxies.append(prox)
valid += 1
except:
invalid += 1
continue
for prox in scraped_proxies:
try:
r = requests.get("http://www.google.com/", proxies={"http": f"http://{prox}", "https": f"https://{prox}"}, timeout=5)
valid_proxies.remove(prox)
valid += 1
except:
valid -= 1
continue
scraped_proxies.clear()
if __name__ == "__main__":
valid_proxies = []
scraped_proxies = []
threads = []
invalid = 0
scraped = 0
valid = 0
for x in [title, menu, main]:
threads.append(threading.Thread(target=x))
for x in threads:
x.start()
for x in threads:
x.join()
|
preproceesing.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from multiprocessing import Process,Queue
from time import time
import seaborn as sns
import math
import json
from tqdm import tqdm
sns.set_style('darkgrid')
def analyze_dataset(path):
line = [str(i) for i in range(15)]
train_data = pd.read_csv(path+'/adult.data',sep=',',names=line,index_col=False)
test_data = pd.read_csv(path+'/adult.test',sep=',',names=line,index_col=False)
plt.figure(figsize=(40,8))
i=1
for col,name in zip([0,2,4,10,11,12],['Age','fnlwgt',"Edu-num",'capital-gain','capital-loss','hours-per-week']):
data = [int(i) for i in train_data[str(col)].values] + [int(i) for i in test_data[str(col)].values]
n_list = []
for j in tqdm(data):
if j!='?':
n_list.append(j)
print('cal...',len(data),len(n_list))
plt.subplot(2,3,i)
plt.title(name)
plt.hist(n_list,bins=50)
i+=1
plt.savefig(path+'/hist.png')
class EntropyBased_Discrete(object):
def __init__(self,max_splits,ent_threshold,multiprocessing=False,workers=10):
self.max_splits = max_splits
self.ent_threshold = ent_threshold
self.multiprocessing = multiprocessing
self.workers = workers
def entropy(self,pairs):
N_pairs = len(pairs)
label_cnts = {}
for pair in pairs:
label = pair[-1]
if label_cnts.get(label,-1)==-1:
label_cnts[label]=0
label_cnts[label]+=1
ent = 0.0
# one_prob = np.sum([p[-1] for p in pairs])/N_pairs
# if one_prob==0 or one_prob==1:
# return 0.0
# return -one_prob * math.log(one_prob,2) - (1-one_prob) * math.log(1-one_prob,2)
for k in label_cnts:
p = float(label_cnts[k])/N_pairs
ent -= p*math.log2(p)
return ent
def multi_split_onetime(self,pairs):
self.final_res = Queue()
worker_dict = {}
pair_range = np.arange(len(pairs))
true_workers = min(len(pairs),self.workers)
range_length = len(pairs)//true_workers
for w in range(true_workers):
if w==true_workers-1:
worker_dict[w] = pair_range[w*range_length:]
else:
worker_dict[w] = pair_range[w*range_length:(w+1)*range_length]
Process_list = []
st = time()
pairs = pairs[np.argsort(pairs[:,0])]
for w in range(true_workers):
Process_list.append(Process(target=self.single_worker,args=(pairs,worker_dict[w])))
for process in Process_list:
process.start()
for process in Process_list:
process.join()
min_ent = np.inf
min_pair = tuple()
#print(time()-st)
res = [self.final_res.get() for process in Process_list]
#print(res)
for pair in res:
if pair[-1]<min_ent:
min_pair = pair
min_ent = pair[-1]
pre_Ent,post_Ent,split_id,min_ent = min_pair
pre_dict = {
'ent':pre_Ent,
'pairs':pairs[:split_id+1]
}
post_dict = {
'ent':post_Ent,
'pairs':pairs[split_id+1:]
}
return pre_dict,post_dict,min_ent
def single_worker(self,pairs,length_list):
min_ent = np.inf
split_id = -1
length = len(pairs)
pre_Ent,post_Ent = -1,-1
for i in tqdm(length_list):
pre_data,post_data = pairs[:i+1],pairs[i+1:]
pre_ent,post_ent = self.entropy(pre_data),self.entropy(post_data)
avg_ent = (pre_ent*len(pre_data) + post_ent*len(post_data))/length
if avg_ent < min_ent:
min_ent = avg_ent
split_id = i
pre_Ent = pre_ent
post_Ent = post_ent
self.final_res.put((pre_Ent,post_Ent,split_id,min_ent))
def split_onetime(self,pairs):
"""
find the best split of the record
According to weighted avg info-entropy
"""
min_ent = np.inf
split_id = -1
length = len(pairs)
pre_Ent,post_Ent = -1,-1
sort_pairs = pairs[np.argsort(pairs[:,0])]
for i in tqdm(range(length)):
pre_data,post_data = sort_pairs[:i+1],sort_pairs[i+1:]
pre_ent,post_ent = self.entropy(pre_data),self.entropy(post_data)
avg_ent = pre_ent*len(pre_data)/len(sort_pairs) + post_ent*len(post_data)/len(sort_pairs)
if avg_ent < min_ent:
min_ent = avg_ent
split_id = i
pre_Ent = pre_ent
post_Ent = post_ent
pre_dict = {
'ent':pre_Ent,
'pairs':sort_pairs[:split_id+1]
}
post_dict = {
'ent':post_Ent,
'pairs':sort_pairs[split_id+1:]
}
return pre_dict,post_dict,min_ent
def split(self,data):
"""
Make the entropy-based discretization
"""
#sorted_data = data[np.argsort(data[:,0])]
print('sorted! start splitting...')
self.final_split = {
0:{}
}
self.final_split[0]['ent'] = np.inf
self.final_split[0]['pairs'] = data
split_ind =[0]
classes = 1
for i in split_ind:
if self.multiprocessing:
pre_split,post_split,entropy = self.multi_split_onetime(self.final_split[i]['pairs'])
else:
pre_split,post_split,entropy = self.split_onetime(self.final_split[i]['pairs'])
if entropy > self.ent_threshold and classes < self.max_splits:
self.final_split[i] = pre_split
next_key = max(self.final_split.keys())+1
self.final_split[next_key] = post_split
split_ind.extend([i])
split_ind.extend([next_key])
classes += 1
else:
break
def test_spliter():
data = np.array(
[
[56,1],[87,1],[129,0],[23,0],[342,1],
[641,1],[63,0],[2764,1],[2323,0],[453,1],
[10,1],[9,0],[88,1],[222,0],[97,0],
]
)
spliter = EntropyBased_Discrete(6,0.5)
spliter.split(data)
print(spliter.final_split)
def test_single_continuous_dis():
path = 'C:/Users/24829/Desktop/data_minign/assignment-1/adult_dataset'
line = [str(i) for i in range(15)]
attr = 0
train_data = pd.read_csv(path+'/adult.data',sep=',',names=line,index_col=False)
test_data = pd.read_csv(path+'/adult.test',sep=',',names=line,index_col=False)
age_data = []
for i,ind in zip(train_data[str(attr)].values,train_data[str(14)].values):
if ind==' <=50K':
age_data.append([int(i),0])
else:
age_data.append([int(i),1])
for i,ind in zip(test_data[str(attr)].values,test_data[str(14)].values):
if ind==' <=50K.':
age_data.append([int(i),0])
else:
age_data.append([int(i),1])
spliter = EntropyBased_Discrete(12,0.5,multiprocessing=True,workers=20)
spliter.split(np.array(age_data))
print('complete')
split_set = []
i=1
for val in tqdm(spliter.final_split.values()):
single_set = set([ind[0] for ind in val['pairs']])
split_set.append(single_set)
print(i,len(single_set),single_set)
i+=1
new_col_train = []
new_col_test = []
for ind in train_data[str(attr)]:
for j,discre_set in enumerate(split_set):
if int(ind) in discre_set:
new_col_train.append(j)
break
for ind in test_data[str(attr)]:
for j,discre_set in enumerate(split_set):
if int(ind) in discre_set:
new_col_test.append(j)
break
# with open(path+'/age.json','w',encoding='utf-8') as writer:
# writer.write(json.dumps([new_col_train,new_col_test],ensure_ascii=False,indent=4))
def get_dict():
attr_dict = {
"workclass" : [' Private',' Self-emp-not-inc',' Self-emp-inc',' Federal-gov',' Local-gov',' State-gov'," Without-pay"," Never-worked"],
"education" : [' Bachelors',' Some-college',' 11th',' HS-grad',' Prof-school',' Assoc-acdm',' Assoc-voc',' 9th',' 7th-8th',' 12th',' Masters',' 1st-4th',' 10th',' Doctorate',' 5th-6th',' Preschool'],
"marital-status": [' Married-civ-spouse',' Divorced',' Never-married',' Separated',' Widowed',' Married-spouse-absent',' Married-AF-spouse'],
"occupation" :' Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces'.split(','),
"relationship" :' Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried'.split(','),
"race":' White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black'.split(','),
"sex":[' Female',' Male'],
"native-country":' United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands'.split(','),
"label":[' <=50K',' >50K']
}
V = []
for k,v in attr_dict.items():
print(k,len(v))
V.append(len(v))
plt.figure(figsize=(10,5))
plt.bar(np.arange(len(V)),V)
plt.xticks(np.arange(len(V)),list(attr_dict.keys()))
for x,y in zip(np.arange(len(V)),V):
plt.text(x,y+1,str(y))
plt.show()
attr2id_dict={}
for k,v in attr_dict.items():
attr2id_dict[k] = dict((j,i) for i,j in enumerate(v))
attr2id_dict[k][' ?']=-1
#print(attr2id_dict)
return attr2id_dict
def preprocess(path):
line = ['age',"workclass",'fnlwgt','education','education-num',
'marital-status','occupation','relationship','race','sex','capital-gain','capital-loss','hours-per-week',
'native-country','label']
# key_to_lines = dict((j,i) for i,j in enumerate(line))
train_data = pd.read_csv(path+'/adult.data',sep=',',names=line,index_col=False)
test_data = pd.read_csv(path+'/adult.test',sep=',',names=line,index_col=False)
print(train_data.head(10))
attr2id_dict = get_dict()
key_list = set(list(attr2id_dict.keys()))
new_train_data = dict()
new_test_data = dict()
#tokenize the discrete attributes and label, for missing values, we give it -1
for l in tqdm(line):
new_train_data[l]=[]
if l in key_list:
for data in train_data[l].values:
new_train_data[l].append(attr2id_dict[l][data])
else:
new_train_data[l]=train_data[l].values
new_test_data[l]=[]
if l in key_list:
for data in test_data[l].values:
if l=='label':
data = data[:-1]
new_test_data[l].append(attr2id_dict[l][data])
else:
new_test_data[l]=test_data[l].values
new_train_df = pd.DataFrame(data=new_train_data)
new_test_df = pd.DataFrame(data=new_test_data)
new_train_df.to_csv(path+'/train.csv',index=False)
new_test_df.to_csv(path+'/test.csv',index=False)
#process train:
if __name__=="__main__":
# analyze_dataset('C:/Users/24829/Desktop/data_minign/assignment-1/adult_dataset')
#test_spliter()
#test_single_continuous_dis()
preprocess('C:/Users/24829/Desktop/data_minign/assignment-1/adult_dataset')
#a = get_dict() |
cnn_util.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for CNN benchmarks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import threading
import numpy as np
import tensorflow as tf
def tensorflow_version_tuple():
v = tf.__version__
major, minor, patch = v.split('.')
return (int(major), int(minor), patch)
def tensorflow_version():
vt = tensorflow_version_tuple()
return vt[0] * 1000 + vt[1]
def log_fn(log):
print(log)
def roll_numpy_batches(array, batch_size, shift_ratio):
"""Moves a proportion of batches from start to the end of the array.
This function moves a proportion of batches, specified by `shift_ratio`, from
the starts of the array to the end. The number of batches moved is rounded
down to the nearest integer. For example,
```
roll_numpy_batches([1, 2, 3, 4, 5, 6], 2, 0.34) == [3, 4, 5, 6, 1, 2]
```
Args:
array: A Numpy array whose first dimension is the batch dimension.
batch_size: The batch size.
shift_ratio: Proportion of batches to move from the start of the array to
the end of the array.
Returns:
A new Numpy array, with a proportion of the batches at the start of `array`
moved to the end.
"""
num_items = array.shape[0]
assert num_items % batch_size == 0
num_batches = num_items // batch_size
starting_batch = int(num_batches * shift_ratio)
starting_item = starting_batch * batch_size
return np.roll(array, -starting_item, axis=0)
# For Python 2.7 compatibility, we do not use threading.Barrier.
class Barrier(object):
"""Implements a lightweight Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and simultaneously return once they have
all made that call.
# Implementation adopted from boost/thread/barrier.hpp
"""
def __init__(self, parties):
"""Create a barrier, initialised to 'parties' threads."""
self.cond = threading.Condition(threading.Lock())
self.parties = parties
# Indicates the number of waiting parties.
self.waiting = 0
# generation is needed to deal with spurious wakeups. If self.cond.wait()
# wakes up for other reasons, generation will force it go back to wait().
self.generation = 0
self.broken = False
def wait(self):
"""Wait for the barrier."""
with self.cond:
# Check if the barrier has been disabled or not.
if self.broken:
return
gen = self.generation
self.waiting += 1
if self.waiting == self.parties:
self.waiting = 0
self.generation += 1
self.cond.notify_all()
# loop because of spurious wakeups
while gen == self.generation:
self.cond.wait()
# TODO(huangyp): Remove this method once we find a way to know which step
# is the last barrier.
def abort(self):
"""Clear existing barrier and disable this barrier."""
with self.cond:
if self.waiting > 0:
self.generation += 1
self.cond.notify_all()
self.broken = True
class ImageProducer(object):
"""An image producer that puts images into a staging area periodically.
This class is useful for periodically running a set of ops, `put_ops` on a
different thread every `batch_group_size` steps.
The notify_image_consumption() method is used to increment an internal counter
so that every `batch_group_size` times it is called, `put_ops` is executed. A
barrier is placed so that notify_image_consumption() will block until
the previous call to `put_ops` has been executed.
The start() method is used to start the thread that runs `put_ops`.
The done() method waits until the last put_ops is executed and stops the
thread.
The purpose of this class is to fill an image input pipeline every
`batch_group_size` steps. Suppose `put_ops` supplies `batch_group_size` images
to the input pipeline when run, and that every step, 1 batch of images is
consumed. Then, by calling notify_image_consumption() every step, images are
supplied to the input pipeline at the same amount they are consumed.
Example usage:
```
put_ops = ... # Enqueues `batch_group_size` batches to a StagingArea
get_op = ... # Dequeues 1 batch, and does some operations on it
batch_group_size = 4
with tf.Session() as sess:
image_producer = cnn_util.ImageProducer(sess, put_op, batch_group_size)
image_producer.start()
for _ in range(100):
sess.run(get_op)
image_producer.notify_image_consumption()
```
"""
def __init__(self, sess, put_ops, batch_group_size, use_python32_barrier):
self.sess = sess
self.num_gets = 0
self.put_ops = put_ops
self.batch_group_size = batch_group_size
self.done_event = threading.Event()
if (use_python32_barrier and
sys.version_info[0] == 3 and sys.version_info[1] >= 2):
self.put_barrier = threading.Barrier(2)
else:
self.put_barrier = Barrier(2)
def _should_put(self):
return (self.num_gets + 1) % self.batch_group_size == 0
def done(self):
"""Stop the image producer."""
self.done_event.set()
self.put_barrier.abort()
self.thread.join()
def start(self):
"""Start the image producer."""
self.sess.run([self.put_ops])
self.thread = threading.Thread(target=self._loop_producer)
# Set daemon to true to allow Ctrl + C to terminate all threads.
self.thread.daemon = True
self.thread.start()
def notify_image_consumption(self):
"""Increment the counter of image_producer by 1.
This should only be called by the main thread that consumes images and runs
the model computation. One batch of images should be consumed between
calling start() and the first call to this method. Then, one batch of images
should be consumed between any two successive calls to this method.
"""
if self._should_put():
self.put_barrier.wait()
self.num_gets += 1
def _loop_producer(self):
while not self.done_event.isSet():
self.sess.run([self.put_ops])
self.put_barrier.wait()
class BaseClusterManager(object):
"""The manager for the cluster of servers running the benchmark."""
def __init__(self, params):
worker_hosts = params.worker_hosts.split(',')
ps_hosts = params.ps_hosts.split(',') if params.ps_hosts else []
cluster = {'worker': worker_hosts}
if ps_hosts:
cluster['ps'] = ps_hosts
self._cluster_spec = tf.train.ClusterSpec(cluster)
def get_target(self):
"""Returns a target to be passed to tf.Session()."""
raise NotImplementedError('get_target must be implemented by subclass')
def join_server(self):
raise NotImplementedError('join must be implemented by subclass')
def get_cluster_spec(self):
return self._cluster_spec
def num_workers(self):
return len(self._cluster_spec.job_tasks('worker'))
def num_ps(self):
if 'ps' in self._cluster_spec.jobs:
return len(self._cluster_spec.job_tasks('ps'))
else:
return 0
class GrpcClusterManager(BaseClusterManager):
"""A cluster manager for a cluster networked with gRPC."""
def __init__(self, params, config_proto):
super(GrpcClusterManager, self).__init__(params)
if params.job_name == 'controller':
self._target = 'grpc://%s' % self._cluster_spec.job_tasks('worker')[0]
else:
self._server = tf.train.Server(self._cluster_spec,
job_name=params.job_name,
task_index=params.task_index,
config=config_proto,
protocol=params.server_protocol)
self._target = self._server.target
def get_target(self):
return self._target
def join_server(self):
return self._server.join()
|
qiushi_thread.py | #coding:utf-8
import requests
from lxml import etree
import json
from queue import Queue
import threading
class Qiushi(object):
"""
使用queue的存取形式取代了return
启动的时候启动三个线程,每个线程分别进行while true的向队列中放url, resp_data, data
第一个线程不停的存url队列,第二个线程不停的区url队列进行生产resp_data,
第三个线程不停的从第二个队列中区resp_data进行save
总结就是生产消费模式
"""
def __init__(self):
self.url = 'https://www.qiushibaike.com/hot/page/{}/'
self.url_list = None
self.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
}
self.file = open('qiushi.json','w', encoding='utf8')
# 创建三个队列对象
self.url_queue = Queue()
self.response_queue = Queue()
self.data_queue = Queue()
def generate_url_list(self):
for i in range(1, 14):
self.url_queue.put(self.url.format(i))
def get_data(self):
while True:
# 获取队列中的url
url = self.url_queue.get()
print('正在获取{}对应的响应'.format(url))
response = requests.get(url, headers=self.headers)
if response.status_code == 503:
self.url_queue.put(url)
else:
# 往响应队列中添加数据
self.response_queue.put(response.content)
# 每task_down一次就会从queue队列中移除一个
# url_queue.join()就会判断队列中是不是为0,为0的话才会去执行主线程的其他任务
self.url_queue.task_done()
def parse_data(self):
while True:
data = self.response_queue.get()
print('正在解析数据')
# 创建element对象
html = etree.HTML(data)
# 定位出帖子节点列表
node_list = html.xpath('//*[contains(@id,"qiushi_tag_")]')
# print(len(node_list))
# 构建存放返回数据的列表
data_list = []
# 遍历节点列表,从没一个节点中抽取数据
for node in node_list:
temp = dict()
try:
temp['user'] = node.xpath('./div[1]/a[2]/h2/text()')[0].strip()
temp['link'] = 'https://www.qiushibaike.com' + node.xpath('./div[1]/a[2]/@href')[0]
temp['age'] = node.xpath('./div[1]/div/text()')[0]
temp['gender'] = node.xpath('./div[1]/div/@class')[0].split(' ')[-1].replace('Icon', '')
except:
temp['user'] = '匿名用户'
temp['link'] = None
temp['age'] = None
temp['gender'] = None
# 将数据加入数据列表
data_list.append(temp)
self.data_queue.put(data_list)
self.response_queue.task_done()
def save_data(self):
while True:
data_list = self.data_queue.get()
print('正在保存数据')
for data in data_list:
print(data)
str_data = json.dumps(data, ensure_ascii=False) + ',\n'
self.file.write(str_data)
self.data_queue.task_done()
def __del__(self):
self.file.close()
def run(self):
# 创建存储线程的列表
thread_list = []
# 创建生成url列表的线程
t_generate_url = threading.Thread(target=self.generate_url_list)
thread_list.append(t_generate_url)
# 创建获取数据的线程
for i in range(3):
t = threading.Thread(target=self.get_data)
thread_list.append(t)
# 创建解析数据的线程
for i in range(3):
t = threading.Thread(target=self.parse_data)
thread_list.append(t)
t_save_data = threading.Thread(target=self.save_data)
thread_list.append(t_save_data)
# 开启线程
for t in thread_list:
# 守护线程,主线程结束后结束
t.setDaemon(True)
t.start()
# 关闭队列
for q in [self.url_queue, self.response_queue, self.data_queue]:
q.join()
if __name__ == '__main__':
qiushi = Qiushi()
qiushi.run() |
log.py | # coding:utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import functools
import logging
import os
import sys
import time
import threading
from typing import List
import colorlog
from colorama import Fore
loggers = {}
log_config = {
'DEBUG': {
'level': 10,
'color': 'purple'
},
'INFO': {
'level': 20,
'color': 'green'
},
'TRAIN': {
'level': 21,
'color': 'cyan'
},
'EVAL': {
'level': 22,
'color': 'blue'
},
'WARNING': {
'level': 30,
'color': 'yellow'
},
'ERROR': {
'level': 40,
'color': 'red'
},
'CRITICAL': {
'level': 50,
'color': 'bold_red'
}
}
class Logger(object):
'''
Deafult logger in PaddleNLP
Args:
name(str) : Logger name, default is 'PaddleNLP'
'''
def __init__(self, name: str = None):
name = 'PaddleNLP' if not name else name
self.logger = logging.getLogger(name)
for key, conf in log_config.items():
logging.addLevelName(conf['level'], key)
self.__dict__[key] = functools.partial(self.__call__, conf['level'])
self.__dict__[key.lower()] = functools.partial(
self.__call__, conf['level'])
self.format = colorlog.ColoredFormatter(
'%(log_color)s[%(asctime)-15s] [%(levelname)8s]%(reset)s - %(message)s',
log_colors={key: conf['color']
for key, conf in log_config.items()})
self.handler = logging.StreamHandler()
self.handler.setFormatter(self.format)
self.logger.addHandler(self.handler)
self.logLevel = 'DEBUG'
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
self._is_enable = True
def disable(self):
self._is_enable = False
def enable(self):
self._is_enable = True
@property
def is_enable(self) -> bool:
return self._is_enable
def __call__(self, log_level: str, msg: str):
if not self.is_enable:
return
self.logger.log(log_level, msg)
@contextlib.contextmanager
def use_terminator(self, terminator: str):
old_terminator = self.handler.terminator
self.handler.terminator = terminator
yield
self.handler.terminator = old_terminator
@contextlib.contextmanager
def processing(self, msg: str, interval: float = 0.1):
'''
Continuously print a progress bar with rotating special effects.
Args:
msg(str): Message to be printed.
interval(float): Rotation interval. Default to 0.1.
'''
end = False
def _printer():
index = 0
flags = ['\\', '|', '/', '-']
while not end:
flag = flags[index % len(flags)]
with self.use_terminator('\r'):
self.info('{}: {}'.format(msg, flag))
time.sleep(interval)
index += 1
t = threading.Thread(target=_printer)
t.start()
yield
end = True
logger = Logger()
|
compare_WalltoallCNOT_adam.py |
import multiprocessing
import importlib
import qiskit
import numpy as np
import sys
sys.path.insert(1, '../')
import qtm.encoding
import qtm.fubini_study
import qtm.ansatz
import qtm.constant
import qtm.base
def run_walltoall(num_layers, num_qubits):
thetas = np.ones(num_layers * 3 * num_qubits)
psi = 2*np.random.rand(2**num_qubits)-1
psi = psi / np.linalg.norm(psi)
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc.initialize(psi, range(0, num_qubits))
loss_values = []
thetass = []
for i in range(0, 400):
if i % 20 == 0:
print('W_alltoall: (' + str(num_layers) +
',' + str(num_qubits) + '): ' + str(i))
grad_loss = qtm.base.grad_loss(
qc,
qtm.ansatz.create_WalternatingCNOT_layerd_state,
thetas, r=1/2, s=np.pi/2, num_layers=num_layers)
if i == 0:
m, v = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.adam(thetas, m, v, i, grad_loss)
thetass.append(thetas.copy())
qc_copy = qtm.ansatz.create_WalternatingCNOT_layerd_state(
qc.copy(), thetas, num_layers)
loss = qtm.loss.loss_basis(qtm.base.measure(
qc_copy, list(range(qc_copy.num_qubits))))
loss_values.append(loss)
traces = []
fidelities = []
for thetas in thetass:
# Get |psi~> = U_target|000...>
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc = qtm.ansatz.create_WalternatingCNOT_layerd_state(
qc, thetas, num_layers=num_layers).inverse()
psi_hat = qiskit.quantum_info.Statevector.from_instruction(qc)
# Calculate the metrics
trace, fidelity = qtm.base.get_metrics(psi, psi_hat)
traces.append(trace)
fidelities.append(fidelity)
print('Writting ... ' + str(num_layers) +
' layers,' + str(num_qubits) + ' qubits')
np.savetxt("../../experiments/tomographyCNOT/tomography_walltoall_" + str(num_layers) +
"/" + str(num_qubits) + "/loss_values_adam.csv", loss_values, delimiter=",")
np.savetxt("../../experiments/tomographyCNOT/tomography_walltoall_" + str(num_layers) +
"/" + str(num_qubits) + "/thetass_adam.csv", thetass, delimiter=",")
np.savetxt("../../experiments/tomographyCNOT/tomography_walltoall_" +
str(num_layers) + "/" + str(num_qubits) + "/traces_adam.csv", traces, delimiter=",")
np.savetxt("../../experiments/tomographyCNOT/tomography_walltoall_" + str(num_layers) +
"/" + str(num_qubits) + "/fidelities_adam.csv", fidelities, delimiter=",")
if __name__ == "__main__":
# creating thread
num_layers = [1, 2, 3, 4, 5]
num_qubits = [3, 4, 5]
t_walltoalls = []
for i in num_layers:
for j in num_qubits:
t_walltoalls.append(multiprocessing.Process(
target=run_walltoall, args=(i, j)))
for t_walltoall in t_walltoalls:
t_walltoall.start()
for t_walltoall in t_walltoalls:
t_walltoall.join()
print("Done!")
|
combine.py | #!/usr/bin/python
import threading
import select
import sys
import time
import operator
import queue
input_queue = queue.Queue()
results = {}
def print_result_as_str(result):
result_str = ""
for i in range(0, len(result), 2):
value = int(result[i:i+2].strip(), 16)
if value <= 32 or value >= 176:
result_str += "<%d>" % value
else:
result_str += "%c" % value
print(result_str)
def match(a, b):
if a[:4] == b[-4:]:
return 1
elif a[-4:] == b[:4]:
return -1
return 0
def test_prediction_idx(sorted_results, idx):
# pick first
first = sorted_results[idx]
sorted_results.remove(first)
result = first[0]
while len(sorted_results) > 0:
nothing_matches = False
# find matching
for entry in sorted_results:
r = match(result, entry[0])
if r == -1:
result = result + entry[0][0:2]
sorted_results.remove(entry)
nothing_matches = True
elif r == 1:
result = entry[0][0:2] + result
sorted_results.remove(entry)
nothing_matches = True
else:
pass
if nothing_matches is False:
break
print_result_as_str(result)
def update_prediction():
# start with first input
sorted_results = sorted(results.items(), key=operator.itemgetter(1), reverse=True)
for i,_ in enumerate(sorted_results):
if sorted_results[i][1] > 10:
print("Starting with %s" % sorted_results[i][0])
r = sorted_results
test_prediction_idx(r, i)
def parse_line(line):
try:
# convert to bytes
if False:
line_hex = [int(x, 16) for x in line.split(":")[0].split(" ")]
f = bytearray()
f.append(line_hex[0])
f.append(line_hex[1])
f.append(line_hex[2])
f = bytes(f)
key = f
else:
key = "".join(line.split(":")[0].split(" "))
if key in results:
results[key] += 1
else:
results[key] = 1
return True
except:
return False
def idle():
time.sleep(0.001)
def read_input():
if select.select([sys.stdin,],[],[],0.0)[0]:
for line in sys.stdin:
line = line.strip()
input_queue.put(line)
else:
idle()
def main():
# Start input thread
input_thread = threading.Thread(target=read_input)
input_thread.start()
try:
while True:
try:
line = input_queue.get(timeout=1)
if not parse_line(line):
continue
except queue.Empty:
update_prediction()
idle()
except KeyboardInterrupt:
return
if __name__ == "__main__":
main()
|
adjustScreen.py | import win32gui
import win32con
import threading
import time
# input boxes for x, y, width, height set up
# by default: screenwidth, screenheight used for width height
# dropdown for selectin app
signal_foreground = threading.Event()
def waitOnSignal(hwnd, x,y,width,height):
# TODO: fix wait till WM protocol arrives
# while it's active window (foreground), pretty bad wait as it's a busy waiting
while (win32gui.GetForegroundWindow() == hwnd):
print('Sleeping...')
time.sleep(1)
print('Woke up!')
print('off state')
win32gui.SetWindowPos(hwnd, -2, x, y, width, height, 1)
return
def adjust(title, x,y,width,height, st):
# up only active
hwnd = win32gui.FindWindow(None, title)
print(title)
print(hwnd)
if (hwnd != 0):
print('on state')
win32gui.MoveWindow(hwnd, x,y,width,height,1)
win32gui.SetWindowPos(hwnd, -1, x,y,width,height, 1)
# Thread join wait
# TODO: fix
if (st == 1):
print('got here')
t1 = threading.Thread(target=waitOnSignal, args=(hwnd, x,y,width,height,))
t1.start()
#print('signal arrived')
return
def off(title, x,y,width,height):
print('off state')
hwnd = win32gui.FindWindow(None, title)
win32gui.SetWindowPos(hwnd, -2,x,y,width,height,1)
return
|
02_sectclear_detect.py | import time
import os
from pynput.keyboard import Key, Listener, KeyCode
from pynput import mouse, keyboard
from windowcapture import WindowCapture
import cv2
from threading import Thread
os.chdir(os.path.dirname(os.path.abspath(__file__)))
class ScreenshotGrabber5Sec():
def __init__(self) -> None:
self.listener = None
def start(self):
self.start_keypress_listener()
while True:
time.sleep(0.5)
def start_keypress_listener(self):
if self.listener == None:
self.listener = Listener(on_press=self.on_press,
on_release=self.on_release)
self.listener.start()
def on_press(self, key):
if key == keyboard.Key.f11:
os._exit(1)
def on_release(self, key):
if key == keyboard.Key.f10:
print("Starting sectclear check")
t = Thread(target=self.sectclear_checker)
t.start()
def sectclear_checker(self):
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, custom_rect=[
464+156, 640, 464+261, 641])
while True:
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a+b+c > 700 and d+e+f > 700:
print("Detected sect clear")
break
else:
time.sleep(0.1)
if __name__ == "__main__":
ssg = ScreenshotGrabber5Sec()
ssg.start()
|
Arena.py | from pytorch_classification.utils import Bar, AverageMeter
import multiprocessing as mp
import numpy as np
import time, copy
from utils import *
class Arena():
"""
An Arena class where any 2 agents can be pit against each other.
"""
def __init__(self, player1, player2, game, display=None, num_workers=1, result_queue=None):
"""
Input:
player 1,2: two functions that takes board as input, return action
game: Game object
display: a function that takes board as input and prints it (e.g.
display in othello/OthelloGame). Is necessary for verbose
mode.
see othello/OthelloPlayers.py for an example. See pit.py for pitting
human players/other baselines with each other.
"""
self.player1 = player1
self.player2 = player2
self.game = game
self.display = display
self.num_workers = num_workers
self.result_queue = result_queue
# DEPRICATED -- use arena_worker instead
def playGame(self, verbose=False):
"""
Executes one episode of a game.
Returns:
either
winner: player who won the game (1 if player1, -1 if player2)
or
draw result returned from the game that is neither 1, -1, nor 0.
"""
players = [self.player2, None, self.player1]
curPlayer = 1
board = self.game.getInitBoard()
it = 0
while self.game.getGameEnded(board, curPlayer)==0:
it+=1
if verbose:
assert(self.display)
print("Turn ", str(it), "Player ", str(curPlayer))
self.display(board)
action = players[curPlayer+1](self.game.getCanonicalForm(board, curPlayer))
valids = self.game.getValidMoves(self.game.getCanonicalForm(board, curPlayer),1)
if valids[action]==0:
print("**********************")
print(action)
print(np.where(valids>0))
assert valids[action] >0
board, curPlayer = self.game.getNextState(board, curPlayer, action)
if self.result_queue is not None:
res = self.game.getGameEnded(board, 1)
self.result_queue.put(None if res == 0 else res)
res = self.game.getGameEnded(board, 1)
if verbose:
assert(self.display)
print("Game over: Turn ", str(it), "Result ", str(res))
if self.result_queue is not None: self.result_queue.put(res)
self.display(board)
return res
# PARALLELISM NOT IMPLEMENTED FOR ARENA
# def arena_worker(self, work_queue, done_queue, i, player1, player2):
# print("[Worker " + str(i) + "] Started!")
# while True:
# data = work_queue.get()
# player = data["player"]
# game = data["game"]
# verbose = data["verbose"]
# eps = data["i"]
# start = time.time()
# players = [player2, None, player1] if player == 1 else [player1, None, player2]
# board = game.getInitBoard()
# curPlayer = 1
# it = 0
# while game.getGameEnded(board, curPlayer) == 0:
# it += 1
# if verbose:
# print("Turn ", str(it), "Player ", str(curPlayer))
# self.display(board)
# action = players[curPlayer + 1](game.getCanonicalForm(board, curPlayer))
# valids = game.getValidMoves(game.getCanonicalForm(board, curPlayer), 1) # TODO: Is this check necessary?
# if valids[action] == 0:
# print("<<<<<<<<<<<<<<<<<<<<<<<<<<<<< ERROR >>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
# print(action)
# print(np.where(valids > 0))
# assert valids[action] > 0
# # The action is valid
# board, curPlayer = game.getNextState(board, curPlayer, action)
# res = game.getGameEnded(board, 1)
# if verbose:
# print("Game over: Turn ", str(it), "Result ", str(res))
# self.display(board)
# # Return the result of the game from the HUMAN (player 1) perspective
# # NOTE: This is not the same thing as game.getGameEnded(board, curPlayer)
# done_queue.put((time.time() - start, res))
def playGames(self, num, verbose=False):
"""
Plays num games in which player1 starts num/2 games and player2 starts
num/2 games.
Returns:
oneWon: games won by player1
twoWon: games won by player2
draws: games won by nobody
"""
tracker = ParallelRuntimes(self.num_workers)
bar = Bar('Arena.playGames', max=num)
oneWon = 0
twoWon = 0
draws = 0
# # Multiprocess pitting
# proccesses = []
# work_queue = mp.Queue()
# done_queue = mp.Queue()
# print("[Master] Spawning Workers...")
# # Spawn workers
# for i in range(self.num_workers):
# tup = (work_queue, done_queue, i, self.player1, self.player2)
# proc = mp.Process(target=self.arena_worker, args=tup)
# proc.start()
# proccesses.append(proc)
# print("[Master] Adding work...")
# # Add work to queue
# first_half = int(num / 2)
# for i in range(first_half):
# data = dict()
# data["i"] = i
# data["player"] = 1
# data["game"] = copy.deepcopy(self.game)
# data["verbose"] = verbose
# work_queue.put(data)
# second_half = num - first_half
# for i in range(second_half):
# data = dict()
# data["i"] = i
# data["player"] = -1 # Switch players
# data["game"] = copy.deepcopy(self.game)
# data["verbose"] = verbose
# work_queue.put(data)
# print("[Master] Waiting for results...")
# Wait for results to come in
first_half = int(num / 2)
for i in range(first_half):
start = time.time()
gameResult = self.playGame(verbose=verbose)
if gameResult == 1:
oneWon += 1
elif gameResult == -1:
twoWon += 1
else:
draws += 1
# bookkeeping + plot progress
tracker.update(time.time() - start)
bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(
eps=i + 1, maxeps=num, et=tracker.avg(), total=bar.elapsed_td,
eta=tracker.eta(i + 1, num))
bar.next()
self.player1, self.player2 = self.player2, self.player1
second_half = num - first_half
for i in range(second_half):
start = time.time()
gameResult = self.playGame(verbose=verbose)
if gameResult == -1:
oneWon += 1
elif gameResult == 1:
twoWon += 1
else:
draws += 1
# bookkeeping + plot progress
tracker.update(time.time() - start)
bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(
eps=i + first_half + 1, maxeps=num, et=tracker.avg(), total=bar.elapsed_td,
eta=tracker.eta(i + 1, num))
bar.next()
# print("[Master] Killing workers...")
# # Kill workers
# for p in proccesses:
# p.terminate()
# p.join()
bar.finish()
print("Player 1 Won: " + str(oneWon) + ", Player 2 Won: "+str(twoWon)+", Draws: "+str(draws))
return oneWon, twoWon, draws
|
ossh_server.py | from threading import Thread
from jnpr.junos import Device
from jnpr.junos.utils.config import Config
from jnpr.junos.exception import ConnectError
from jnpr.junos.utils.sw import SW
from jnpr.junos.exception import LockError
from jnpr.junos.exception import UnlockError
from jnpr.junos.exception import ConfigLoadError
from jnpr.junos.exception import CommitError
from junospyez_ossh_server.log import logger
import redis
import requests
from pprint import pprint
import os, sys
import tempfile
from datetime import datetime
import time
import socket
import json
import warnings
from lxml import etree
import xmltodict
import socketio
warnings.filterwarnings(action='ignore', module='.*paramiko.*')
__all__ = ['OutboundSSHServer']
def get_time():
current_time = str(datetime.now().time())
no_sec = current_time.split('.')
poll = no_sec.pop(0)
return poll
def new_log_event(**kwargs):
device_sn = kwargs.get('device_sn', '__UNKNOWN__')
event = kwargs.get('event', '')
configpy_url = kwargs.get('configpy_url', 'http://10.0.0.204:80')
sio = kwargs.get('sio', None)
log = f'[{get_time()}][{device_sn}]: {event}'
logger.info(log)
try:
if sio:
data = {'event_time': get_time(), 'event': f'[{device_sn}]: {event}'}
sio.emit('hub_console', data)
except Exception as e:
logger.info(f'SocketIO Exception: {str(e)}')
def convert(data):
json_data = json.dumps(data)
result = json.loads(json_data)
return result
def repo_sync(redis, facts, **kwargs):
sio = kwargs['sio']
words = kwargs["repo_uri"].split("/")
protocol = words[0]
domain = words[2]
gitlab_url = '{0}//{1}'.format(protocol, domain)
findall = '{0}/api/v4/projects/'.format(gitlab_url)
headers = {
'PRIVATE-TOKEN': "{0}".format(kwargs['repo_auth_token']),
'Content-Type': "application/json",
'User-Agent': "ConfigPy-Node",
'Accept': "*/*",
'Cache-Control': "no-cache",
'Connection': "keep-alive",
'cache-control': "no-cache"
}
querystring = {"per_page": "100"}
try:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Reaching out to gather repo information...')
r = requests.get(findall, headers=headers, params=querystring, timeout=5)
if r.status_code == 200:
returned = r.json()
else:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unable to access repo.')
raise Exception(f'{r.text}')
except Exception as e:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{str(e)}')
redis.hmset(kwargs["device_sn"], {'config': 'repo error'})
redis.hmset(kwargs["device_sn"], {'repo_error': f'{str(e)}'})
return
for x in returned:
if x['path_with_namespace'] in kwargs["repo_uri"]:
raw_config_file = f'{findall}/{x["id"]}/repository/files/{kwargs["cid"]}%2F{kwargs["device_sn"]}%2Eset/raw?ref=master'
try:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Grabbing config file from repo...')
returned = requests.get(raw_config_file, headers=headers, timeout=5)
if returned.status_code == 200:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Device config acquired.')
url_list = ['edit', 'blob']
for item in url_list:
url = f'{kwargs["repo_uri"]}/{item}/master/{kwargs["cid"]}/{kwargs["device_sn"]}.set'
redis.hmset(kwargs["device_sn"], {f'device_repo_{item}': f'{url}'})
print(url)
return returned
else:
raise Exception(f'{returned.text}')
except Exception as e:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{str(e)}')
redis.hmset(kwargs["device_sn"], {'config': 'repo error'})
redis.hmset(kwargs["device_sn"], {'repo_error': f'{str(e)}'})
return
def cluster_srx(**kwargs):
if kwargs['dev'] and kwargs['sio'] and kwargs['facts'] and kwargs['ztp_cluster_node']:
dev = kwargs['dev']
sio = kwargs['sio']
facts = kwargs['facts']
ztp_cluster_node = kwargs['ztp_cluster_node']
try:
dev.rpc.set_chassis_cluster_enable(cluster_id='100', node=ztp_cluster_node, reboot=True)
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Device assigned node id: {ztp_cluster_node}')
except Exception as e:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Error: {str(e)}')
def update_config(device, facts, r, repo_uri, repo_auth_token, sio=None):
# Attempt to find the config file for the connected device
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Searching {repo_uri} in directory {facts["cid"]} for {facts["device_sn"]}.set')
conf_file = repo_sync(r, facts, repo_uri=repo_uri, cid=facts['cid'], device_sn=facts['device_sn'], repo_auth_token=repo_auth_token, sio=sio)
# If found, save the file so it can be loaded by PyEz.
config_file_location = f'configs/{facts["device_sn"]}.set'
try:
if conf_file:
print('Writing to file')
with open(config_file_location, "w") as file:
file.write(conf_file.text)
except AttributeError:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unable to access remote repo')
r.hmset(facts['device_sn'], {'config': 'repo error'})
return
except Exception as e:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unable to access remote repo')
r.hmset(facts['device_sn'], {'config': 'repo error'})
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'default exception.')
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{str(e)}')
return
# Check to see if config file exists.
# It should, but we'll check anyway.
if os.path.exists(config_file_location):
device.bind(cu=Config)
device.timeout = 300
# Lock the configuration, load configuration changes, and commit
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Locking the configuration')
try:
device.cu.lock()
except LockError as err:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unable to lock configuration: {err}')
device.close()
return
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Loading configuration changes')
try:
device.cu.load(path=config_file_location, merge=True, ignore_warning='statement not found')
except (ConfigLoadError, Exception) as err:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unable to load configuration changes: {err}')
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unlocking the configuration')
try:
device.cu.unlock()
except UnlockError:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unable to unlock configuration: {err}')
try:
os.remove(config_file_location)
except Exception as e:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unable to delete {config_file_location}')
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{str(e)}')
device.close()
return
show_compare = device.cu.diff(rb_id=0)
if show_compare is None:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'*** No changes needed...')
try:
r.hmset(facts['device_sn'], {'config': 'compliant'})
except Exception as e:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{e}')
else:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Changes found!')
try:
r.hmset(facts['device_sn'], {'config': 'non-compliant'})
r.hmset(facts['device_sn'], {'last change': show_compare})
except Exception as e:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{e}')
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{show_compare}')
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Updating config...')
try:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Running commit check...')
try:
r.hmset(facts['device_sn'], {'config': 'running commit check'})
except Exception as e:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{e}')
if device.cu.commit_check() is True:
try:
r.hmset(facts['device_sn'], {'config': 'commit check passed'})
r.hmset(facts['device_sn'], {'config': 'running commit confirmed'})
except Exception as e:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{e}')
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Commit check passed.')
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'running commit confirmed')
try:
commit = device.cu.commit(comment='Loaded by DSC.', confirm=2, timeout=240)
if commit:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Commit complete.')
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Confirming changes...')
try:
r.hmset(facts['device_sn'], {'config': 'confirm commit'})
except Exception as e:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{str(e)}')
if device.cu.commit_check():
try:
r.hmset(facts['device_sn'], {'config': 'compliant'})
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Commit confirmed.')
except Exception as e:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{e}')
except Exception as e:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{e}')
r.hmset(facts['device_sn'], {'config': 'device lost'})
return
else:
r.hmset(facts['device_sn'], {'config': 'commit check failed'})
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Commit check failed...')
device.cu.unlock()
try:
os.remove(config_file_location)
except Exception as e:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unable to delete {config_file_location}')
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{e}')
except CommitError as err:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unable to commit configuration: {err}')
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unlocking the configuration')
try:
device.cu.unlock()
except UnlockError as err:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unable to unlock configuration: {err}')
try:
os.remove(config_file_location)
except Exception as e:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unable to delete {config_file_location}')
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{str(e)}')
device.close()
return
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unlocking the configuration')
try:
device.cu.unlock()
except UnlockError as err:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unable to unlock configuration: {err}')
device.close()
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Removing local config file')
try:
os.remove(config_file_location)
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Config removed.')
except Exception as e:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Unable to delete {config_file_location}')
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{str(e)}')
else:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Config was not found for {facts["device_sn"]}')
r.hmset(facts['device_sn'], {'configpy-node_error': 'Could not locate config in configs/'})
r.hmset(facts['device_sn'], {'config': 'repo error'})
return
def gather_basic_facts(device, r, sio):
"""
Using the provide Junos Device object, retrieve basic facts about the device.
Parameters
----------
device : Device
The Junos device instance
Returns
-------
dict
A collection of basic facts about the device that will be stored in the database.
"""
# -----------------------------------------------------
# get information from the provided device facts object
# -----------------------------------------------------
basic_facts = dict()
for x, y in device.facts.items():
basic_facts[f'{x}'] = f'{y}'
# TODO: Most if this code below should be reworked.
basic_facts['device_sn'] = device.facts['serialnumber']
if device.facts['hostname'] is None:
basic_facts['hostname'] = 'no_hostname'
new_log_event(sio=sio, device_sn=basic_facts["device_sn"], event=f'No hostname')
else:
basic_facts['hostname'] = device.facts['hostname']
if device.facts['srx_cluster']:
new_log_event(sio=sio, device_sn=basic_facts["device_sn"], event=f'Device: SRX Cluster!')
basic_facts['srx_cluster'] = 'True'
basic_facts['os_version'] = device.facts['version_RE0']
try:
basic_facts['device_model'] = device.facts['model_info']['node0']
except:
basic_facts['device_model'] = 'error'
try:
basic_facts['hostname'] = device.facts['hostname_info']['node0']
except:
basic_facts['device_model'] = 'error'
else:
# Gather general faqs
basic_facts['os_version'] = device.facts['version']
basic_facts['device_model'] = device.facts['model']
# FIXME - Likely a better way to handle this error if contact is not found.
# Get SNMP contact ID:
try:
# Look for SNMP contact in config.
new_log_event(sio=sio, device_sn=basic_facts["device_sn"], event=f'Attempting to find SNMP in config..')
snmp_config = device.rpc.get_config(filter_xml='snmp/contact')
found_snmp_value = etree.tostring(snmp_config, encoding='unicode')
parsed_snmp_value = xmltodict.parse(found_snmp_value)
#pprint(parsed_snmp_value['configuration']['snmp']['contact'])
#config = device.rpc.get_config(filter_xml='snmp', options={'format':'json'})
new_log_event(sio=sio, device_sn=basic_facts["device_sn"], event=f'{parsed_snmp_value["configuration"]["snmp"]["contact"]}')
basic_facts['cid'] = parsed_snmp_value['configuration']['snmp']['contact']
new_log_event(sio=sio, device_sn=basic_facts["device_sn"], event=f'CID saved to redis db')
except Exception as e:
new_log_event(sio=sio, device_sn=basic_facts["device_sn"], event=f'No CID found in the device config')
new_log_event(sio=sio, device_sn=basic_facts["device_sn"], event=f'{str(e)}')
# Index error is for if the SNMP contact is not defined in the config.
try:
'''
# Lets see if ConfigPy set a bootstrap SNMP client ID in the Redis DB.
# This would happen when someone generates a config with ConfigPy.
# and then they push it to the GitLab repo.
# when pushed, the config is assigned to a client contact ID (folder) in GitLab.
# If a bootstrap ID is detected in the Redis DB, then we'll copy it to the Redis CID value.
# So in a way, I think of this as a sort of bootstrap.
# Without this, we have no way to find the config file for a new (out of box) device.
'''
# Get redis keys for this device
redis_info = r.hgetall(device.facts['serialnumber'])
# Get the ztp, if one exists. Else, hit the KeyError exception.
ztp = redis_info[b'ztp'].decode("utf-8")
# If it's found, let's make that the new cid value.
if ztp:
basic_facts['cid'] = str(ztp)
new_log_event(sio=sio, device_sn=basic_facts["device_sn"], event=f'found ZTP flag!')
new_log_event(sio=sio, device_sn=basic_facts["device_sn"], event=f'setting CID value to {str(ztp)}')
else:
# Shouldn't be used, but just in case.
new_log_event(sio=sio, device_sn=basic_facts["device_sn"], event=f'No ZTP flag set in redis')
basic_facts['cid'] = 'none'
except KeyError:
# ztp isn't a valid key, so no ztp.
new_log_event(sio=sio, device_sn=basic_facts["device_sn"], event=f'Exception..setting CID to none.')
basic_facts['cid'] = 'none'
# -------------------------------------------------------------------------------
# need to do a route lookup using the outbound ssh config to determine the actual
# management interface used to reach this service. For now, use the first
# server ip-address (name). It is possible that the device could be configured
# with multiple ossh clients. If we need to support this use-case, then we will
# need to add additional checks for specific client name.
# -------------------------------------------------------------------------------
config = device.rpc.get_config(filter_xml='system/services/outbound-ssh')
servers = config.xpath('.//servers/name')
server_ipaddr = servers[0].text
# -----------------------------------------------------------------------------------
# get mgmt_interface value from the route lookup. The route lookup will give use the
# logical interface name, which we will also need for finding the assigned ip-address
# -----------------------------------------------------------------------------------
resp = device.rpc.get_route_information(destination=server_ipaddr)
if_name = resp.xpath('.//via | .//nh-local-interface')[0].text
basic_facts['mgmt_interface'] = if_name.partition('.')[0] # physical interface
# -------------------------------------------------------------
# get mgmt_ipaddr from the if_name obtained by the route lookup
# -------------------------------------------------------------
if_info = device.rpc.get_interface_information(interface_name=if_name, terse=True)
basic_facts['mgmt_ipaddr'] = if_info.findtext('.//ifa-local').partition('/')[0].strip()
# ----------------------------------------------------------
# get mgmt_macaddr value assigned to the management interface
# ----------------------------------------------------------
resp = device.rpc.get_interface_information(interface_name=basic_facts['mgmt_interface'], media=True)
found = resp.findtext('.//current-physical-address').strip()
basic_facts['mgmt_macaddr'] = found
return basic_facts
def check_backup_firmware(device, facts, sio, r):
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Setting connection timeout to 300 seconds.')
device.timeout = 300
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Gathering snapshot details.')
result = device.rpc.get_snapshot_information(slice='alternate', media='internal')
result = etree.tostring(result, encoding='unicode')
parsed_snmp_value = xmltodict.parse(result)
sw_version = []
for x in parsed_snmp_value['snapshot-information']['software-version']:
if x['package']['package-name'] == 'junos':
sw_version.append(x['package']['package-version'])
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Found: {sw_version}')
if len(sw_version) == 2 and sw_version[0] != sw_version[1]:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Mismatch detected!')
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Updating backup firmware...')
r.hmset(facts['device_sn'], {'config': f'updating snapshot'})
result = device.rpc.request_snapshot(slice='alternate')
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Result: {result}')
def xml_to_dict(var):
result = etree.tostring(var, encoding='unicode')
final_dict = xmltodict.parse(result)
return final_dict
def find_checksum(dev, destination=None):
result = dev.rpc.get_checksum_information(path=f'{destination}')
parsed_checksum_value = xml_to_dict(result)
found_checksum = parsed_checksum_value['checksum-information']['file-checksum'].get('checksum', None)
return found_checksum
def upload_file(dev, sio, facts, source=None, destination=None, srx_firmware_checksum=None, **kwargs):
found_checksum = None
if source and destination:
# Check to see if file exists on the device.
try:
result = dev.rpc.file_list(path=f'{destination}')
result = xml_to_dict(result)
if result['directory-list']['directory']['file-information']['file-name'] == destination:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'File exists. Checking file checksum...')
found_checksum = find_checksum(dev, destination)
if found_checksum and found_checksum == srx_firmware_checksum:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Checksum matched!')
return 0
else:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Existing file checksum does not match.')
except Exception as e:
pass
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Uploading file... Please be patient...')
dev.rpc.file_copy(source=f'{source}', destination=f'{destination}')
found_checksum = find_checksum(dev, destination)
if found_checksum and found_checksum == srx_firmware_checksum:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Upload complete. Checksum matched!')
return 0
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'ERROR: Upload complete, but checksum does not match!')
return 1
else:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'ERROR: Upload failed. Invalid source or destination.')
return 1
def update_cluster(dev, software_location, srx_firmware, r, facts, sio, srx_firmware_checksum):
# Upgrades can take a while. Increasing timeout to 15 minutes.
dev.timeout = 900
# Default result to 1. It must become 0 for the upgrade to proceed.
result = 1
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Searching for firmware and uploading if needed.')
result = upload_file(dev, sio, facts, source=f'{software_location}{srx_firmware}', destination=f'/var/tmp/{srx_firmware}', srx_firmware_checksum=srx_firmware_checksum)
# If checksum was found and it matched the provided checksum, then perform ISSU.
if result == 0:
# FIXME: I would like to use the below, but it doesn't return any useful results...
# result = dev.rpc.request_package_in_service_upgrade(package_name=f'/var/tmp/{srx_firmware}', no_sync=True)
r.hmset(facts['device_sn'], {'config': f'running ISSU'})
result = dev.cli(f'request system software in-service-upgrade {srx_firmware} no-sync')
if 'ISSU not allowed' in result:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'ERROR: {result}')
return 1
else:
return 0
# If any of the above is invalid, send an error syslog.
elif result == 1:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'ERROR: Aborting upgrade.')
return 1
def update_firmware(device, software_location, srx_firmware_url, r, facts, sio):
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Starting software update...')
r.hmset(facts['device_sn'], {'firmware_update': 'Starting software update...'})
def update_progress(device, report):
# log the progress of the installing process
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{report}')
r.hmset(facts['device_sn'], {'firmware_update': f'{report}'})
package = f'{software_location}{srx_firmware_url}'
remote_path = '/var/tmp'
validate = True
# Create an instance of SW
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Creating an instance of SW')
device.bind(sw=SW)
try:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Running installer...')
r.expire(facts['device_sn'], 2400)
r.hmset(facts['device_sn'], {'firmware_update': 'Running installer...'})
status = device.sw.install(package=package, remote_path=remote_path, progress=update_progress, validate=validate, timeout=2400, checksum_timeout=400)
except Exception as err:
msg = f'Unable to install software, {err}'
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{msg}')
status = False
return
if status is True:
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Software installation complete.')
rsp = device.sw.reboot(all_re=False)
r.hmset(facts['device_sn'], {'firmware_update': 'Rebooting'})
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'Upgrade pending reboot cycle, please be patient.')
new_log_event(sio=sio, device_sn=facts["device_sn"], event=f'{rsp}')
return
class OutboundSSHServer(object):
NAME = 'outbound-ssh-server'
DEFAULT_LISTEN_BACKLOG = 10
logger = logger
def __init__(self, ipaddr, port, login_user, login_password, configpy_url, redis_url, repo_uri, repo_auth_token, software_location, srx_firmware, srx_firmware_checksum, on_device=None, on_error=None, unittest=None):
"""
Parameters
----------
ipaddr : str
The server IP address
port : int
The server port to accept requests
login_user : str
The device login user name
login_password : str
The device login password
on_device : callaback
User callback function that is invoked when the server has remote device NETCONF establish
and has retrieved basic facts. The callback takes two parameters, the PyEZ device instance,
and a dictionary of gathered basic facts, for example:
>>> import json
>>>
>>> def dump_facts(device, facts):
>>> print("GOT FACTS: ", json.dumps(facts, indent=3))
on_error : callback
User callback function that is invoked when error occurs when attempting to
connect or communicate with remote device. The callback takes two parameters, the PyEZ device
instance (could be None) and the error exception instance, for example:
>>> import json
>>>
>>> def dump_error(device, exc):
>>> print("GOT ERROR: ", str(exc))
"""
self.thread = None
self.socket = None
self.login_user = login_user
self.login_password = login_password
self.configpy_url = configpy_url
self.redis_url = redis_url
self.repo_uri = repo_uri
self.software_location = software_location
self.srx_firmware = srx_firmware
self.srx_firmware_checksum = srx_firmware_checksum
self.repo_auth_token = repo_auth_token
self.bind_ipaddr = ipaddr
self.bind_port = int(port)
self.listen_backlog = OutboundSSHServer.DEFAULT_LISTEN_BACKLOG
self._callbacks = dict()
self.on_device = on_device # callable also provided at :meth:`start`
self.on_error = on_error # callable also provided at :meth:`start`
self.r = redis.Redis(host=redis_url, port=6379, db=0)
self.sio = socketio.Client()
self.sio.connect(configpy_url)
# ----------------------------------------------------------------------------------------------------------------
# PROPERTIES
# ----------------------------------------------------------------------------------------------------------------
@property
def name(self):
return self.__class__.NAME
@property
def on_device(self):
def no_op(device, facts):
pass
return self._callbacks['on_device'] or no_op
@on_device.setter
def on_device(self, callback):
if callback and not callable(callback):
raise ValueError('callback is not callable')
self._callbacks['on_device'] = callback
@property
def on_error(self):
def no_op(device, exc):
pass
return self._callbacks['on_error'] or no_op
@on_error.setter
def on_error(self, callback):
if callback and not callable(callback):
raise ValueError('callback is not callable')
self._callbacks['on_error'] = callback
# ----------------------------------------------------------------------------------------------------------------
# PRIVATE METHODS
# ----------------------------------------------------------------------------------------------------------------
def _setup_server_socket(self):
s_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s_sock.bind((self.bind_ipaddr, self.bind_port))
s_sock.listen(self.listen_backlog)
self.socket = s_sock
def _server_thread(self):
"""
This is the running thread target for the outbound-ssh server. The purpose of this thread
is to await inbound connections from the Junos devices and then spawn a specific thread for that device
for future processing.
"""
try:
self._setup_server_socket()
except Exception as exc:
new_log_event(event=f'{self.name}: failed to setup socket: {str(exc)}')
return
while True:
# await a device to make an outbound connection. The socket accept() returns a tuple
# (socket, (device ipaddr, device port)). create a new thread to process the inbound with
# this information
try:
in_sock, (in_addr, in_port) = self.socket.accept()
except ConnectionAbortedError:
# this triggers when the server socket is closed by the shutdown() method
new_log_event(event=f'{self.name} shutting down')
return
in_str = f'{in_addr}:{in_port}'
dev_name = f'device-{in_str}'
new_log_event(event=f'{self.name}: accepted connection from {in_str}')
# spawn a device-specific thread for further processing
try:
Thread(name=dev_name, target=self._device_thread,
kwargs=dict(in_sock=in_sock, in_addr=in_addr, in_port=in_port)).start()
except RuntimeError as exc:
new_log_event(event=f'{self.name}: ERROR: failed to start processing {in_addr}: {str(exc)}')
in_sock.close()
continue
# NOT REACHABLE
def _device_thread(self, in_sock, in_addr, in_port):
"""
This is a thread target function that is launched by the OSSH service. The purpose of this function
is to make a NETCONF connection back to the device, gather basic facts, and store them into the database.
If all goes well, the `facts` field in the database will contain the information about the device. If
all does not go well, then there is an "error" field within the facts that the caller can example. The
most likely error reason is the provided user name and password values are not correct.
Parameters
----------------
in_addr: str
the Junos device management IP address that connected to the OSSH service
in_sock: socket
the socket instance from the outbound connection.
"""
via_str = f'{in_addr}:{in_port}'
sock_fd = in_sock.fileno()
# attempt to add this device entry to the database; the unique ID is the IP address.
# it is AOK if the entry already exists as the device-thread will simply update the record with the
# information retrieved
try:
new_log_event(sio=self.sio, event=f'establishing netconf to device via: {via_str}')
dev = Device(sock_fd=sock_fd, user=self.login_user, password=self.login_password)
dev.open()
except ConnectError as exc:
new_log_event(sio=self.sio, event=f'Connection error to device via {via_str}: {exc.msg}')
in_sock.close()
return
except Exception as exc:
new_log_event(sio=self.sio, event=f'unable to establish netconf to device via {via_str}: {str(exc)}')
in_sock.close()
# #######################################
# Begin Working With Device
# #######################################
try:
# #######################################
# Gather Device Facts
# #######################################
new_log_event(sio=self.sio, event=f'Gathering basic facts from device via: {via_str}')
facts = gather_basic_facts(dev, self.r, self.sio)
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'Finished gathering facts.')
try:
facts = convert(facts)
self.r.hmset(facts['device_sn'], facts)
self.r.hmset(facts['device_sn'], {'Last seen': get_time()})
self.r.expire(facts['device_sn'], 300)
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'database for {facts["device_sn"]} will expire in 5 min.')
except Exception as e:
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'{str(e)}')
# #######################################
# Firmware Check / Update
# #######################################
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event='***** TASK : Starting firmware audit...', configpy_url=self.configpy_url)
# Check is SRX 300, 320, 340, or 345:
if 'SRX3' in facts['device_model']:
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'Device type: SRX')
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'Device firmware: {facts["os_version"]}')
# Check for a firmware mismatch:
if facts['os_version'] not in self.srx_firmware and '.tgz' in self.srx_firmware:
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'Desired firmware: {self.srx_firmware}')
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'Firmware mismatch detected')
# Start firmware upgrade if device is not in an SRX cluster.
if facts['srx_cluster'] == 'False':
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'Device is not in a cluster.')
self.r.hmset(facts['device_sn'], {'config': 'updating firmware'})
self.r.expire(facts['device_sn'], 900)
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'Set device DB timeout for 15 min while update is performed.')
update_firmware(dev, self.software_location, self.srx_firmware, self.r, facts, self.sio)
dev.close()
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'***** TASK : Disconnecting - Device needs to reboot.')
return
elif facts['srx_cluster'] == 'True':
# Attempt to perform ISSU on the SRX cluster
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event='Starting Cluster firmware audit...', configpy_url=self.configpy_url)
try:
status = update_cluster(dev, self.software_location, self.srx_firmware, self.r, facts, self.sio, self.srx_firmware_checksum)
except Exception as e:
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'ERROR: {str(e)}')
return
if status == 0:
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'Disconnecting device.')
dev.close()
elif status == 1:
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'ERROR: ISSU firmware upgrade failed.')
else:
# This code should not be reached..
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'ERROR: unreachable code reached!')
pass
elif facts['os_version'] in self.srx_firmware and '.tgz' in self.srx_firmware:
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'Desired firmware: {self.srx_firmware}')
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'Firmware is compliant.')
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'***** TASK : Firmware audit complete.')
# #######################################
# Backup Firmware Check / Update
# #######################################
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'***** TASK : Starting Backup Firmware audit...')
try:
check_backup_firmware(dev, facts, self.sio, self.r)
except Exception as e:
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'Error: {str(e)}')
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'***** TASK : Backup Firmware audit complete.')
# #######################################
# Configure Clustering
# #######################################
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'***** TASK : Starting ZTP cluster audit')
device_db = self.r.hgetall(facts["device_sn"])
device_values = {}
for x, y in device_db.items():
device_values[x.decode("utf-8")] = y.decode("utf-8")
if 'ztp_cluster_node' in device_values:
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'ZTP cluster flag set!')
cluster_srx(dev=dev, sio=self.sio, facts=facts, ztp_cluster_node=device_values['ztp_cluster_node'])
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'***** TASK : ZTP Cluster audit complete')
try:
self.r.hdel(facts['device_sn'], 'ztp_cluster_node')
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'Successfully removed "ztp_cluster_node"')
except Exception as e:
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'Failed to remove "ztp_cluster_node" key.')
dev.close()
self.r.hmset(facts['device_sn'], {'config': 'clustering'})
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'***** TASK : Disconnecting - Device needs to reboot.')
return
else:
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'ZTP Cluster flag not set.')
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'***** TASK : ZTP Cluster audit complete')
# #######################################
# Config Check / Update
# #######################################
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'***** TASK : Starting config audit...')
update_config(dev, facts, self.r, self.repo_uri, self.repo_auth_token, sio=self.sio)
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'***** TASK : Config audit complete')
# call user on-device callback
# self.on_device(dev, facts)
new_log_event(sio=self.sio, device_sn=facts["device_sn"], event=f'Completed device with management IP address: {facts["mgmt_ipaddr"]}')
dev.close()
new_log_event(sio=self.sio, event=f'{"- " * 30}')
except Exception as exc:
error = f"ERROR: unable to process device {in_addr}:{in_port}: %s" % str(exc)
new_log_event(sio=self.sio, event=f'{error}')
sys.exit(1)
if self.on_error:
self.on_error(dev, exc)
finally:
in_sock.close()
sys.exit(1)
return
# ----------------------------------------------------------------------------------------------------------------
# PUBLIC METHODS
# ----------------------------------------------------------------------------------------------------------------
def start(self, on_device=None, on_error=None):
"""
Start the ossh-server background thread.
Examples
--------
Start the server, will use the existing server attributes.
>>> ok, msg = server.start()
Start the server, provide a new `on_device` callback.
>>> import json
>>>
>>> def dump_facts(device, facts):
>>> print("GOT FACTS: ", json.dumps(facts, indent=3))
>>>
>>> ok, msg = server.start(on_device=dump_facts)
Parameters
----------
on_device : callaback
User callback function that is invoked when the server has remote device NETCONF establish
and has retrieved basic facts.
on_error : callback
User callback function that is invoked when error occurs when attempting to
connect or communicate with remote device.
Returns
-------
tuple
ok : bool
True if started ok, False otherwise
msg : str
message string
"""
if self.socket:
msg = f'{self.name} already running'
new_log_event(event=f'{msg}')
return False, msg
if on_device:
self.on_device = on_device
if on_error:
self.on_error = on_error
new_log_event(event=f'{self.name}: starting on {self.bind_ipaddr}:{self.bind_port}')
try:
self.thread = Thread(name=self.name, target=self._server_thread)
self.thread.start()
except Exception as exc:
msg = f'{self.name} unable to start: %s' % str(exc)
new_log_event(event=f'{msg}')
return False, msg
msg = f'{self.name}: started'
new_log_event(event=f'{msg}')
return True, msg
def stop(self):
'''
Stops the ossh-server thread.
Examples
--------
>>> server.stop()
'''
self.socket.close()
self.thread = None
self.socket = None
new_log_event(event=f'{self.name}: stopped')
|
02_breakout_es.py | #!/usr/bin/env python3
import gym
import ptan
import time
import argparse
import numpy as np
import collections
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import multiprocessing as mp
from torch import optim
from tensorboardX import SummaryWriter
NOISE_STD = 0.05
LEARNING_RATE = 0.001
PROCESSES_COUNT = 3
ITERS_PER_UPDATE = 10
MAX_ITERS = 100000
# result item from the worker to master. Fields:
# 1. random seed used to generate noise
# 2. reward obtained from the positive noise
# 3. reward obtained from the negative noise
# 4. total amount of steps done
RewardsItem = collections.namedtuple('RewardsItem', field_names=['seed', 'pos_reward', 'neg_reward', 'steps'])
class VBN(nn.Module):
"""
Virtual batch normalization
"""
def __init__(self, n_feats, epsilon=1e-5, batches_to_train=1):
super(VBN, self).__init__()
self.epsilon = epsilon
self.means = torch.zeros()
class Net(nn.Module):
def __init__(self, input_shape, n_actions):
super(Net, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(input_shape[0], 32, kernel_size=3, stride=2),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=2),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=2),
nn.ReLU()
)
conv_out_size = self._get_conv_out(input_shape)
self.fc = nn.Sequential(
nn.Linear(conv_out_size, 256),
nn.ReLU(),
nn.Linear(256, n_actions),
nn.Softmax()
)
def _get_conv_out(self, shape):
o = self.conv(Variable(torch.zeros(1, *shape)))
return int(np.prod(o.size()))
def forward(self, x):
fx = x.float() / 256
conv_out = self.conv(fx).view(fx.size()[0], -1)
return self.fc(conv_out)
def evaluate(env, net, cuda=False):
obs = env.reset()
reward = 0.0
steps = 0
while True:
obs_v = ptan.agent.default_states_preprocessor([obs], cuda=cuda, volatile=True)
act_prob = net(obs_v)
acts = act_prob.max(dim=1)[1]
obs, r, done, _ = env.step(acts.data.cpu().numpy()[0])
reward += r
steps += 1
if done:
break
return reward, steps
def sample_noise(net, cuda=False):
res = []
neg = []
for p in net.parameters():
noise_t = torch.from_numpy(np.random.normal(size=p.data.size()).astype(np.float32))
if cuda:
noise_t = noise_t.cuda(async=True)
res.append(noise_t)
neg.append(-noise_t)
return res, neg
def eval_with_noise(env, net, noise, noise_std, cuda=False):
# old_params = net.state_dict()
for p, p_n in zip(net.parameters(), noise):
p.data += noise_std * p_n
r, s = evaluate(env, net, cuda=cuda)
for p, p_n in zip(net.parameters(), noise):
p.data -= noise_std * p_n
# net.load_state_dict(old_params)
return r, s
def compute_ranks(x):
"""
Returns ranks in [0, len(x))
Note: This is different from scipy.stats.rankdata, which returns ranks in [1, len(x)].
"""
assert x.ndim == 1
ranks = np.empty(len(x), dtype=int)
ranks[x.argsort()] = np.arange(len(x))
return ranks
def compute_centered_ranks(x):
y = compute_ranks(x.ravel()).reshape(x.shape).astype(np.float32)
y /= (x.size - 1)
y -= .5
return y
def train_step(optimizer, net, batch_noise, batch_reward, writer, step_idx, noise_std):
weighted_noise = None
norm_reward = compute_centered_ranks(np.array(batch_reward))
for noise, reward in zip(batch_noise, norm_reward):
if weighted_noise is None:
weighted_noise = [reward * p_n for p_n in noise]
else:
for w_n, p_n in zip(weighted_noise, noise):
w_n += reward * p_n
m_updates = []
optimizer.zero_grad()
for p, p_update in zip(net.parameters(), weighted_noise):
update = p_update / (len(batch_reward) * noise_std)
p.grad = Variable(-update)
m_updates.append(torch.norm(update))
writer.add_scalar("update_l2", np.mean(m_updates), step_idx)
optimizer.step()
def make_env():
env = gym.make("PongNoFrameskip-v4")
return ptan.common.wrappers.wrap_dqn(env)
def worker_func(worker_id, params_queue, rewards_queue, cuda, noise_std):
env = make_env()
net = Net(env.observation_space.shape, env.action_space.n)
net.eval()
if cuda:
net.cuda()
while True:
params = params_queue.get()
if params is None:
break
net.load_state_dict(params)
for _ in range(ITERS_PER_UPDATE):
seed = np.random.randint(low=0, high=65535)
np.random.seed(seed)
noise, neg_noise = sample_noise(net, cuda=cuda)
pos_reward, pos_steps = eval_with_noise(env, net, noise, noise_std, cuda=cuda)
neg_reward, neg_steps = eval_with_noise(env, net, neg_noise, noise_std, cuda=cuda)
rewards_queue.put(RewardsItem(seed=seed, pos_reward=pos_reward,
neg_reward=neg_reward, steps=pos_steps+neg_steps))
pass
if __name__ == "__main__":
mp.set_start_method('spawn')
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action='store_true', help="Enable CUDA mode")
parser.add_argument("--lr", type=float, default=LEARNING_RATE)
parser.add_argument("--noise-std", type=float, default=NOISE_STD)
parser.add_argument("--iters", type=int, default=MAX_ITERS)
args = parser.parse_args()
writer = SummaryWriter(comment="-breakout-es_lr=%.3e_sigma=%.3e" % (args.lr, args.noise_std))
env = make_env()
net = Net(env.observation_space.shape, env.action_space.n)
print(net)
params_queues = [mp.Queue(maxsize=1) for _ in range(PROCESSES_COUNT)]
rewards_queue = mp.Queue(maxsize=ITERS_PER_UPDATE)
workers = []
for idx, params_queue in enumerate(params_queues):
proc = mp.Process(target=worker_func, args=(idx, params_queue, rewards_queue, args.cuda, args.noise_std))
proc.start()
workers.append(proc)
print("All started!")
optimizer = optim.Adam(net.parameters(), lr=args.lr)
for step_idx in range(args.iters):
# broadcasting network params
params = net.state_dict()
for q in params_queues:
q.put(params)
# waiting for results
t_start = time.time()
batch_noise = []
batch_reward = []
results = 0
batch_steps = 0
batch_steps_data = []
while True:
while not rewards_queue.empty():
reward = rewards_queue.get_nowait()
np.random.seed(reward.seed)
noise, neg_noise = sample_noise(net)
batch_noise.append(noise)
batch_reward.append(reward.pos_reward)
batch_noise.append(neg_noise)
batch_reward.append(reward.neg_reward)
results += 1
batch_steps += reward.steps
batch_steps_data.append(reward.steps)
# print("Result from %d: %s, noise: %s" % (
# idx, reward, noise[0][0, 0, 0:1]))
if results == PROCESSES_COUNT * ITERS_PER_UPDATE:
break
time.sleep(0.01)
dt_data = time.time() - t_start
m_reward = np.mean(batch_reward)
train_step(optimizer, net, batch_noise, batch_reward, writer, step_idx, args.noise_std)
writer.add_scalar("reward_mean", m_reward, step_idx)
writer.add_scalar("reward_std", np.std(batch_reward), step_idx)
writer.add_scalar("reward_max", np.max(batch_reward), step_idx)
writer.add_scalar("batch_episodes", len(batch_reward), step_idx)
writer.add_scalar("batch_steps", batch_steps, step_idx)
speed = batch_steps / (time.time() - t_start)
writer.add_scalar("speed", speed, step_idx)
dt_step = time.time() - t_start - dt_data
print("%d: reward=%.2f, speed=%.2f f/s, data_gather=%.3f, train=%.3f, steps_mean=%.2f, min=%.2f, max=%.2f, steps_std=%.2f" % (
step_idx, m_reward, speed, dt_data, dt_step, np.mean(batch_steps_data),
np.min(batch_steps_data), np.max(batch_steps_data), np.std(batch_steps_data)))
for worker, p_queue in zip(workers, params_queues):
p_queue.put(None)
worker.join()
|
helpers.py | """
This file contains various helpers and basic variables for the test suite.
Defining them here rather than in conftest.py avoids issues with circular imports
between test/conftest.py and test/backend/<backend>/conftest.py files.
"""
import functools
import logging
import multiprocessing
import os
import subprocess
import sys
import tempfile
import time
import traceback
from abc import ABCMeta, abstractmethod
from pathlib import Path
from libqtile import command, config, ipc, layout
from libqtile.confreader import Config
from libqtile.core.manager import Qtile
from libqtile.lazy import lazy
from libqtile.log_utils import init_log
from libqtile.resources import default_config
# the sizes for outputs
WIDTH = 800
HEIGHT = 600
SECOND_WIDTH = 640
SECOND_HEIGHT = 480
max_sleep = 5.0
sleep_time = 0.1
class Retry:
def __init__(self, fail_msg='retry failed!', ignore_exceptions=(),
dt=sleep_time, tmax=max_sleep, return_on_fail=False):
self.fail_msg = fail_msg
self.ignore_exceptions = ignore_exceptions
self.dt = dt
self.tmax = tmax
self.return_on_fail = return_on_fail
def __call__(self, fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
tmax = time.time() + self.tmax
dt = self.dt
ignore_exceptions = self.ignore_exceptions
while time.time() <= tmax:
try:
return fn(*args, **kwargs)
except ignore_exceptions:
pass
except AssertionError:
break
time.sleep(dt)
dt *= 1.5
if self.return_on_fail:
return False
else:
raise AssertionError(self.fail_msg)
return wrapper
class BareConfig(Config):
auto_fullscreen = True
groups = [
config.Group("a"),
config.Group("b"),
config.Group("c"),
config.Group("d")
]
layouts = [
layout.stack.Stack(num_stacks=1),
layout.stack.Stack(num_stacks=2)
]
floating_layout = default_config.floating_layout
keys = [
config.Key(
["control"],
"k",
lazy.layout.up(),
),
config.Key(
["control"],
"j",
lazy.layout.down(),
),
]
mouse = []
screens = [config.Screen()]
follow_mouse_focus = False
reconfigure_screens = False
class Backend(metaclass=ABCMeta):
"""A base class to help set up backends passed to TestManager"""
def __init__(self, env, args=()):
self.env = env
self.args = args
def create(self):
"""This is used to instantiate the Core"""
return self.core(*self.args)
def configure(self, manager):
"""This is used to do any post-startup configuration with the manager"""
pass
@abstractmethod
def fake_click(self, x, y):
"""Click at the specified coordinates"""
pass
@abstractmethod
def get_all_windows(self):
"""Get a list of all windows in ascending order of Z position"""
pass
@Retry(ignore_exceptions=(ipc.IPCError,), return_on_fail=True)
def can_connect_qtile(socket_path, *, ok=None):
if ok is not None and not ok():
raise AssertionError()
ipc_client = ipc.Client(socket_path)
ipc_command = command.interface.IPCCommandInterface(ipc_client)
client = command.client.InteractiveCommandClient(ipc_command)
val = client.status()
if val == 'OK':
return True
return False
class TestManager:
"""Spawn a Qtile instance
Setup a Qtile server instance on the given display, with the given socket
and log files. The Qtile server must be started, and then stopped when it
is done. Windows can be spawned for the Qtile instance to interact with
with various `.test_*` methods.
"""
def __init__(self, backend, debug_log):
self.backend = backend
self.log_level = logging.DEBUG if debug_log else logging.INFO
self.backend.manager = self
self.proc = None
self.c = None
self.testwindows = []
def __enter__(self):
"""Set up resources"""
self._sockfile = tempfile.NamedTemporaryFile()
self.sockfile = self._sockfile.name
return self
def __exit__(self, exc_type, exc_value, exc_tb):
"""Clean up resources"""
self.terminate()
self._sockfile.close()
def start(self, config_class, no_spawn=False, state=None):
rpipe, wpipe = multiprocessing.Pipe()
def run_qtile():
try:
os.environ.pop("DISPLAY", None)
os.environ.pop("WAYLAND_DISPLAY", None)
kore = self.backend.create()
os.environ.update(self.backend.env)
init_log(self.log_level, log_path=None, log_color=False)
Qtile(
kore,
config_class(),
socket_path=self.sockfile,
no_spawn=no_spawn,
state=state
).loop()
except Exception:
wpipe.send(traceback.format_exc())
self.proc = multiprocessing.Process(target=run_qtile)
self.proc.start()
# First, wait for socket to appear
if can_connect_qtile(self.sockfile, ok=lambda: not rpipe.poll()):
ipc_client = ipc.Client(self.sockfile)
ipc_command = command.interface.IPCCommandInterface(ipc_client)
self.c = command.client.InteractiveCommandClient(ipc_command)
self.backend.configure(self)
return
if rpipe.poll(0.1):
error = rpipe.recv()
raise AssertionError("Error launching qtile, traceback:\n%s" % error)
raise AssertionError("Error launching qtile")
def create_manager(self, config_class):
"""Create a Qtile manager instance in this thread
This should only be used when it is known that the manager will throw
an error and the returned manager should not be started, otherwise this
will likely block the thread.
"""
init_log(self.log_level, log_path=None, log_color=False)
kore = self.backend.create()
config = config_class()
for attr in dir(default_config):
if not hasattr(config, attr):
setattr(config, attr, getattr(default_config, attr))
return Qtile(kore, config, socket_path=self.sockfile)
def terminate(self):
if self.proc is None:
print("qtile is not alive", file=sys.stderr)
else:
# try to send SIGTERM and wait up to 10 sec to quit
self.proc.terminate()
self.proc.join(10)
if self.proc.is_alive():
print("Killing qtile forcefully", file=sys.stderr)
# desperate times... this probably messes with multiprocessing...
try:
os.kill(self.proc.pid, 9)
self.proc.join()
except OSError:
# The process may have died due to some other error
pass
if self.proc.exitcode:
print("qtile exited with exitcode: %d" % self.proc.exitcode, file=sys.stderr)
self.proc = None
for proc in self.testwindows[:]:
proc.terminate()
proc.wait()
self.testwindows.remove(proc)
def create_window(self, create, failed=None):
"""
Uses the function `create` to create a window.
Waits until qtile actually maps the window and then returns.
"""
client = self.c
start = len(client.windows())
create()
@Retry(ignore_exceptions=(RuntimeError,), fail_msg='Window never appeared...')
def success():
while failed is None or not failed():
if len(client.windows()) > start:
return True
raise RuntimeError("not here yet")
return success()
def _spawn_window(self, *args):
"""Starts a program which opens a window
Spawns a new subprocess for a command that opens a window, given by the
arguments to this method. Spawns the new process and checks that qtile
maps the new window.
"""
if not args:
raise AssertionError("Trying to run nothing! (missing arguments)")
proc = None
def spawn():
nonlocal proc
# Ensure the client only uses the test display
env = os.environ.copy()
env.pop("DISPLAY", None)
env.pop("WAYLAND_DISPLAY", None)
env.update(self.backend.env)
proc = subprocess.Popen(args, env=env)
def failed():
if proc.poll() is not None:
return True
return False
self.create_window(spawn, failed=failed)
self.testwindows.append(proc)
return proc
def kill_window(self, proc):
"""Kill a window and check that qtile unmaps it
Kills a window created by calling one of the `self.test*` methods,
ensuring that qtile removes it from the `windows` attribute.
"""
assert proc in self.testwindows, "Given process is not a spawned window"
start = len(self.c.windows())
proc.terminate()
proc.wait()
self.testwindows.remove(proc)
@Retry(ignore_exceptions=(ValueError,))
def success():
if len(self.c.windows()) < start:
return True
raise ValueError('window is still in client list!')
if not success():
raise AssertionError("Window could not be killed...")
def test_window(self, name, floating=False, wm_type="normal"):
"""
Create a simple window in X or Wayland. If `floating` is True then the wmclass
is set to "dialog", which triggers auto-floating based on `default_float_rules`.
`wm_type` can be changed from "normal" to "notification", which creates a window
that not only floats but does not grab focus.
Windows created with this method must have their process killed explicitly, no
matter what type they are.
"""
python = sys.executable
path = Path(__file__).parent / "scripts" / "window.py"
wmclass = "dialog" if floating else "TestWindow"
return self._spawn_window(python, path, "--name", wmclass, name, wm_type)
def test_notification(self, name="notification"):
return self.test_window(name, wm_type="notification")
def groupconsistency(self):
groups = self.c.groups()
screens = self.c.screens()
seen = set()
for g in groups.values():
scrn = g["screen"]
if scrn is not None:
if scrn in seen:
raise AssertionError(
"Screen referenced from more than one group.")
seen.add(scrn)
assert screens[scrn]["group"] == g["name"]
assert len(seen) == len(screens), "Not all screens had an attached group."
@Retry(ignore_exceptions=(AssertionError,), fail_msg='Window did not die!')
def assert_window_died(client, window_info):
client.sync()
wid = window_info['id']
assert wid not in set([x['id'] for x in client.windows()])
|
test_runner.py | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import threading
import numpy as np
import pytest
import tensorrt as trt
from polygraphy import cuda, mod
from polygraphy.backend.trt import (
CreateConfig,
EngineFromNetwork,
NetworkFromOnnxBytes,
Profile,
TrtRunner,
engine_from_network,
network_from_onnx_bytes,
)
from polygraphy.exception import PolygraphyException
from polygraphy.logger import G_LOGGER
from tests.models.meta import ONNX_MODELS
from tests.helper import time_func
class TestLoggerCallbacks(object):
@pytest.mark.parametrize("sev", G_LOGGER.SEVERITY_LETTER_MAPPING.keys())
def test_set_severity(self, sev):
G_LOGGER.severity = sev
class TestTrtRunner(object):
def test_can_name_runner(self):
NAME = "runner"
runner = TrtRunner(None, name=NAME)
assert runner.name == NAME
def test_basic(self):
model = ONNX_MODELS["identity"]
network_loader = NetworkFromOnnxBytes(model.loader)
with TrtRunner(EngineFromNetwork(network_loader)) as runner:
assert runner.is_active
assert runner.owns_engine
assert runner.owns_context
model.check_runner(runner)
assert runner.last_inference_time() is not None
assert not runner.is_active
def test_context(self):
model = ONNX_MODELS["identity"]
engine = engine_from_network(NetworkFromOnnxBytes(model.loader))
with engine, TrtRunner(engine.create_execution_context) as runner:
model.check_runner(runner)
assert not runner.owns_engine
assert runner.owns_context
def test_device_buffer_order_matches_bindings(self):
model = ONNX_MODELS["reducable"]
engine = engine_from_network(NetworkFromOnnxBytes(model.loader))
with engine, TrtRunner(engine) as runner:
dev_buf_order = list(runner.device_buffers.keys())
for binding, dev_buf_name in zip(engine, dev_buf_order):
assert binding == dev_buf_name
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_shape_output(self):
model = ONNX_MODELS["reshape"]
engine = engine_from_network(NetworkFromOnnxBytes(model.loader))
with engine, TrtRunner(engine.create_execution_context) as runner:
model.check_runner(runner)
def test_multithreaded_runners_from_engine(self):
model = ONNX_MODELS["identity"]
engine = engine_from_network(NetworkFromOnnxBytes(model.loader))
with engine, TrtRunner(engine) as runner0, TrtRunner(engine) as runner1:
t1 = threading.Thread(target=model.check_runner, args=(runner0,))
t2 = threading.Thread(target=model.check_runner, args=(runner1,))
t1.start()
t2.start()
t1.join()
t2.join()
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
@pytest.mark.skipif(mod.version(trt.__version__)[0:2] == mod.version("7.2"), reason="Bugged in TRT 7.2")
def test_multiple_profiles(self):
model = ONNX_MODELS["dynamic_identity"]
profile0_shapes = [(1, 2, 1, 1), (1, 2, 1, 1), (1, 2, 1, 1)] # Use min==opt==max to fix shapes in the engine.
profile1_shapes = [(1, 2, 1, 1), (1, 2, 2, 2), (1, 2, 4, 4)]
profile2_shapes = [(1, 2, 4, 4), (1, 2, 8, 8), (1, 2, 16, 16)]
network_loader = NetworkFromOnnxBytes(model.loader)
profiles = [
Profile().add("X", *profile0_shapes),
Profile().add("X", *profile1_shapes),
Profile().add("X", *profile2_shapes),
]
config_loader = CreateConfig(profiles=profiles)
with TrtRunner(EngineFromNetwork(network_loader, config_loader)) as runner:
for index, shapes in enumerate([profile0_shapes, profile1_shapes, profile2_shapes]):
runner.set_profile(index)
for shape in shapes:
model.check_runner(runner, {"X": shape})
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_empty_tensor_with_dynamic_input_shape_tensor(self):
model = ONNX_MODELS["empty_tensor_expand"]
shapes = [(1, 2, 0, 3, 0), (2, 2, 0, 3, 0), (4, 2, 0, 3, 0)]
network_loader = NetworkFromOnnxBytes(model.loader)
profiles = [Profile().add("new_shape", *shapes)]
config_loader = CreateConfig(profiles=profiles)
with TrtRunner(EngineFromNetwork(network_loader, config_loader)) as runner:
for shape in shapes:
model.check_runner(runner, {"new_shape": shape})
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Test not compatible with TRT 6")
@pytest.mark.parametrize(
"names, err",
[
(["fake-input", "x"], "Extra keys in"),
(["fake-input"], "Some keys are missing"),
([], "Some keys are missing"),
],
)
def test_error_on_wrong_name_feed_dict(self, names, err):
model = ONNX_MODELS["identity"]
network_loader = NetworkFromOnnxBytes(model.loader)
with TrtRunner(EngineFromNetwork(network_loader)) as runner:
with pytest.raises(PolygraphyException, match=err):
runner.infer({name: np.ones(shape=(1, 1, 2, 2), dtype=np.float32) for name in names})
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Test not compatible with TRT 6")
def test_error_on_wrong_dtype_feed_dict(self):
model = ONNX_MODELS["identity"]
network_loader = NetworkFromOnnxBytes(model.loader)
with TrtRunner(EngineFromNetwork(network_loader)) as runner:
with pytest.raises(PolygraphyException, match="unexpected dtype."):
runner.infer({"x": np.ones(shape=(1, 1, 2, 2), dtype=np.int32)})
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Test not compatible with TRT 6")
def test_error_on_wrong_shape_feed_dict(self):
model = ONNX_MODELS["identity"]
network_loader = NetworkFromOnnxBytes(model.loader)
with TrtRunner(EngineFromNetwork(network_loader)) as runner:
with pytest.raises(PolygraphyException, match="incompatible shape."):
runner.infer({"x": np.ones(shape=(1, 1, 3, 2), dtype=np.float32)})
@pytest.mark.parametrize("use_view", [True, False]) # We should be able to use DeviceArray in place of DeviceView
def test_device_views(self, use_view):
model = ONNX_MODELS["reducable"]
network_loader = NetworkFromOnnxBytes(model.loader)
with TrtRunner(EngineFromNetwork(network_loader)) as runner, cuda.DeviceArray((1,), dtype=np.float32) as x:
x.copy_from(np.ones((1,), dtype=np.float32))
outputs = runner.infer(
{
"X0": x.view() if use_view else x,
"Y0": np.ones((1,), dtype=np.float32),
}
)
assert outputs["identity_out_6"][0] == 2
assert outputs["identity_out_8"][0] == 2
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_no_output_copy(self):
model = ONNX_MODELS["identity"]
network_loader = NetworkFromOnnxBytes(model.loader)
with TrtRunner(EngineFromNetwork(network_loader)) as runner:
inp = np.ones(shape=(1, 1, 2, 2), dtype=np.float32)
outputs = runner.infer({"x": inp}, copy_outputs_to_host=False)
assert isinstance(outputs["y"], cuda.DeviceView)
assert np.array_equal(outputs["y"].numpy(), inp)
def test_subsequent_infers_with_different_input_types(self):
model = ONNX_MODELS["identity"]
network_loader = NetworkFromOnnxBytes(model.loader)
with TrtRunner(EngineFromNetwork(network_loader)) as runner:
inp = np.ones(shape=(1, 1, 2, 2), dtype=np.float32)
def check(outputs):
assert np.all(outputs["y"] == inp)
check(runner.infer({"x": inp}))
check(runner.infer({"x": cuda.DeviceArray().copy_from(inp)}))
check(runner.infer({"x": inp}))
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
@pytest.mark.parametrize("use_view", [True, False]) # We should be able to use DeviceArray in place of DeviceView
def test_device_view_dynamic_shapes(self, use_view):
model = ONNX_MODELS["dynamic_identity"]
profiles = [
Profile().add("X", (1, 2, 1, 1), (1, 2, 2, 2), (1, 2, 4, 4)),
]
runner = TrtRunner(EngineFromNetwork(NetworkFromOnnxBytes(model.loader), CreateConfig(profiles=profiles)))
with runner, cuda.DeviceArray(shape=(1, 2, 3, 3), dtype=np.float32) as arr:
inp = np.random.random_sample(size=(1, 2, 3, 3)).astype(np.float32)
arr.copy_from(inp)
outputs = runner.infer({"X": cuda.DeviceView(arr.ptr, arr.shape, arr.dtype) if use_view else arr})
assert np.all(outputs["Y"] == inp)
assert outputs["Y"].shape == (1, 2, 3, 3)
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported before TRT 8")
def test_cannot_use_device_view_shape_tensor(self):
model = ONNX_MODELS["empty_tensor_expand"]
with TrtRunner(EngineFromNetwork(NetworkFromOnnxBytes(model.loader))) as runner, cuda.DeviceArray(
shape=(5,), dtype=np.int32
) as arr:
with pytest.raises(PolygraphyException, match="it must reside in host memory"):
runner.infer({"data": np.ones((2, 0, 3, 0), dtype=np.float32), "new_shape": arr})
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
@pytest.mark.serial
@pytest.mark.parametrize("copy_outputs", [True, False], ids=["output_dtoh", "no_output_copy"])
@pytest.mark.parametrize("copy_inputs", [True, False], ids=["input_htod", "no_input_copy"])
def test_infer_overhead(self, copy_inputs, copy_outputs):
inp = np.ones(shape=(1, 2, 1024, 1024), dtype=np.float32)
dev_inp = cuda.DeviceArray(shape=inp.shape, dtype=inp.dtype).copy_from(inp)
out = np.zeros(shape=(1, 2, 1024, 1024), dtype=np.float32) # Using identity model!
dev_out = cuda.DeviceArray(shape=out.shape, dtype=out.dtype)
stream = cuda.Stream()
model = ONNX_MODELS["dynamic_identity"]
profiles = [
Profile().add("X", (1, 2, 1024, 1024), (1, 2, 1024, 1024), (1, 2, 1024, 1024)),
]
inp_name = list(model.input_metadata.keys())[0]
with engine_from_network(
network_from_onnx_bytes(model.loader), CreateConfig(profiles=profiles)
) as engine, engine.create_execution_context() as context, TrtRunner(context) as runner, dev_inp, dev_out:
# Inference outside the TrtRunner
def infer():
if copy_inputs:
dev_inp.copy_from(inp, stream=stream)
context.execute_async_v2(bindings=[dev_inp.ptr, dev_out.ptr], stream_handle=stream.ptr)
if copy_outputs:
dev_out.copy_to(out, stream=stream)
stream.synchronize()
native_time = time_func(infer)
feed_dict = {inp_name: (inp if copy_inputs else dev_inp)}
runner_time = time_func(
lambda: runner.infer(feed_dict, check_inputs=False, copy_outputs_to_host=copy_outputs)
)
# The overhead should be less than 0.75ms, or the runtime should be within 8%
print("Absolute difference: {:.5g}".format(runner_time - native_time))
print("Relative difference: {:.5g}".format(runner_time / native_time))
assert (runner_time - native_time) < 0.75e-3 or runner_time <= (native_time * 1.08)
|
2_Multiprocessing.py | # 2 Using multi processing Python
import time
import multiprocessing
# Create simple function that sleep in 1 second
def do_something():
print('Sleeping 1 second ..')
time.sleep(1)
print('Done Sleeping')
if __name__ == '__main__':
start = time.perf_counter()
# Create 2 multiprocessing and select function as target
p1 = multiprocessing.Process(target=do_something)
p2 = multiprocessing.Process(target=do_something)
p1.start()
p2.start()
p1.join()
p2.join()
# Finish counting and show script runtime
finish = time.perf_counter()
print(f"Finished in {round(finish-start, 2)} second(s)")
|
benchmark.py | from multiprocessing import Process
import psutil
import time
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
def monitor(target, bounds, num_particles, max_iter, costfunc, M):
worker_process = Process(target=target, args=(M, bounds, num_particles, max_iter, costfunc))
worker_process.start()
p = psutil.Process(worker_process.pid)
# log cpu usage of `worker_process` every 10 ms
cpu_percents = [0.0]
start = time.time()
time_at = []
top_prcnt = []
while worker_process.is_alive():
top = psutil.cpu_percent(percpu=True)
top_prcnt.append(top)
cpu_percents.append(p.cpu_percent())
time_at.append(time.time()-start)
time.sleep(0.01)
worker_process.join()
return cpu_percents, time_at, top_prcnt
def multiple_cpu_plot(top_prcnt, time_at, zoom_range=[], step=1):
cols = list()
for i in range(psutil.cpu_count()):
cols.append('cpu_'+str(i+1))
df = pd.DataFrame.from_records(top_prcnt, columns=cols)
df['time_at'] = time_at
fig, ax = plt.subplots(figsize=(20,20), ncols=3, nrows=3)
sns.set_style("dark")
flat_ax = [bx for axs in ax for bx in axs]
for i,sdf in enumerate(flat_ax):
if i >= psutil.cpu_count():
break
if zoom_range!= []:
sdf.set_xticks((np.arange(zoom_range[0], zoom_range[1]+1, step)))
sdf.set_xlim(zoom_range[0], zoom_range[1])
sdf.set_title('cpu_'+str(i+1))
for i in range(psutil.cpu_count()):
sns.lineplot(x='time_at', y='cpu_'+str(i+1), data=df, ax=flat_ax[i]) |
node.py | import threading
import time
import utils
from config import cfg
FOLLOWER = 0
CANDIDATE = 1
LEADER = 2
class Node():
def __init__(self, fellow, my_ip):
self.addr = my_ip
self.fellow = fellow
self.lock = threading.Lock()
self.DB = {}
self.log = []
self.staged = None
self.term = 0
self.status = FOLLOWER
self.majority = ((len(self.fellow) + 1) // 2) + 1
self.voteCount = 0
self.commitIdx = 0
self.timeout_thread = None
self.init_timeout()
# increment only when we are candidate and receive positve vote
# change status to LEADER and start heartbeat as soon as we reach majority
def incrementVote(self):
self.voteCount += 1
if self.voteCount >= self.majority:
print(f"{self.addr} becomes the leader of term {self.term}")
self.status = LEADER
self.startHeartBeat()
# vote for myself, increase term, change status to candidate
# reset the timeout and start sending request to followers
def startElection(self):
self.term += 1
self.voteCount = 0
self.status = CANDIDATE
self.init_timeout()
self.incrementVote()
self.send_vote_req()
# ------------------------------
# ELECTION TIME CANDIDATE
# spawn threads to request vote for all followers until get reply
def send_vote_req(self):
# TODO: use map later for better performance
# we continue to ask to vote to the address that haven't voted yet
# till everyone has voted
# or I am the leader
for voter in self.fellow:
threading.Thread(target=self.ask_for_vote,
args=(voter, self.term)).start()
# request vote to other servers during given election term
def ask_for_vote(self, voter, term):
# need to include self.commitIdx, only up-to-date candidate could win
message = {
"term": term,
"commitIdx": self.commitIdx,
"staged": self.staged
}
route = "vote_req"
while self.status == CANDIDATE and self.term == term:
reply = utils.send(voter, route, message)
if reply:
choice = reply.json()["choice"]
# print(f"RECEIVED VOTE {choice} from {voter}")
if choice and self.status == CANDIDATE:
self.incrementVote()
elif not choice:
# they declined because either I'm out-of-date or not newest term
# update my term and terminate the vote_req
term = reply.json()["term"]
if term > self.term:
self.term = term
self.status = FOLLOWER
# fix out-of-date needed
break
# ------------------------------
# ELECTION TIME FOLLOWER
# some other server is asking
def decide_vote(self, term, commitIdx, staged):
# new election
# decline all non-up-to-date candidate's vote request as well
# but update term all the time, not reset timeout during decision
# also vote for someone that has our staged version or a more updated one
if self.term < term and self.commitIdx <= commitIdx and (
staged or (self.staged == staged)):
self.reset_timeout()
self.term = term
return True, self.term
else:
return False, self.term
# ------------------------------
# START PRESIDENT
def startHeartBeat(self):
print("Starting HEARTBEAT")
if self.staged:
# we have something staged at the beginngin of our leadership
# we consider it as a new payload just received and spread it aorund
self.handle_put(self.staged)
for each in self.fellow:
t = threading.Thread(target=self.send_heartbeat, args=(each, ))
t.start()
def update_follower_commitIdx(self, follower):
route = "heartbeat"
first_message = {"term": self.term, "addr": self.addr}
second_message = {
"term": self.term,
"addr": self.addr,
"action": "commit",
"payload": self.log[-1]
}
reply = utils.send(follower, route, first_message)
if reply and reply.json()["commitIdx"] < self.commitIdx:
# they are behind one commit, send follower the commit:
reply = utils.send(follower, route, second_message)
def send_heartbeat(self, follower):
# check if the new follower have same commit index, else we tell them to update to our log level
if self.log:
self.update_follower_commitIdx(follower)
route = "heartbeat"
message = {"term": self.term, "addr": self.addr}
while self.status == LEADER:
start = time.time()
reply = utils.send(follower, route, message)
if reply:
self.heartbeat_reply_handler(reply.json()["term"],
reply.json()["commitIdx"])
delta = time.time() - start
# keep the heartbeat constant even if the network speed is varying
time.sleep((cfg.HB_TIME - delta) / 1000)
# we may step down when get replied
def heartbeat_reply_handler(self, term, commitIdx):
# i thought i was leader, but a follower told me
# that there is a new term, so i now step down
if term > self.term:
self.term = term
self.status = FOLLOWER
self.init_timeout()
# TODO logging replies
# ------------------------------
# FOLLOWER STUFF
def reset_timeout(self):
self.election_time = time.time() + utils.random_timeout()
# /heartbeat
def heartbeat_follower(self, msg):
# weird case if 2 are PRESIDENT of same term.
# both receive an heartbeat
# we will both step down
term = msg["term"]
if self.term <= term:
self.leader = msg["addr"]
self.reset_timeout()
# in case I am not follower
# or started an election and lost it
if self.status == CANDIDATE:
self.status = FOLLOWER
elif self.status == LEADER:
self.status = FOLLOWER
self.init_timeout()
# i have missed a few messages
if self.term < term:
self.term = term
# handle client request
if "action" in msg:
print("received action", msg)
action = msg["action"]
# logging after first msg
if action == "log":
payload = msg["payload"]
self.staged = payload
# proceeding staged transaction
elif self.commitIdx <= msg["commitIdx"]:
if not self.staged:
self.staged = msg["payload"]
self.commit()
return self.term, self.commitIdx
# initiate timeout thread, or reset it
def init_timeout(self):
self.reset_timeout()
# safety guarantee, timeout thread may expire after election
if self.timeout_thread and self.timeout_thread.isAlive():
return
self.timeout_thread = threading.Thread(target=self.timeout_loop)
self.timeout_thread.start()
# the timeout function
def timeout_loop(self):
# only stop timeout thread when winning the election
while self.status != LEADER:
delta = self.election_time - time.time()
if delta < 0:
self.startElection()
else:
time.sleep(delta)
def handle_get(self, payload):
print(payload)
key = payload["key"]
if key in self.DB:
payload["value"] = self.DB[key]
return payload
else:
return None
# takes a message and an array of confirmations and spreads it to the followers
# if it is a comit it releases the lock
def spread_update(self, message, confirmations=None, lock=None):
for i, each in enumerate(self.fellow):
r = utils.send(each, "heartbeat", message)
if r and confirmations:
# print(f" - - {message['action']} by {each}")
confirmations[i] = True
if lock:
lock.release()
def handle_put(self, payload):
print(payload)
# lock to only handle one request at a time
self.lock.acquire()
self.staged = payload
waited = 0
log_message = {
"term": self.term,
"addr": self.addr,
"payload": payload,
"action": "log",
"commitIdx": self.commitIdx
}
# spread log to everyone
log_confirmations = [False] * len(self.fellow)
threading.Thread(target=self.spread_update,
args=(log_message, log_confirmations)).start()
while sum(log_confirmations) + 1 < self.majority:
waited += 0.0005
time.sleep(0.0005)
if waited > cfg.MAX_LOG_WAIT / 1000:
print(f"waited {cfg.MAX_LOG_WAIT} ms, update rejected:")
self.lock.release()
return False
# reach this point only if a majority has replied and tell everyone to commit
commit_message = {
"term": self.term,
"addr": self.addr,
"payload": payload,
"action": "commit",
"commitIdx": self.commitIdx
}
self.commit()
threading.Thread(target=self.spread_update,
args=(commit_message, None, self.lock)).start()
print("majority reached, replied to client, sending message to commit")
return True
def commit(self):
self.commitIdx += 1
self.log.append(self.staged)
if self.staged["op"] == "put":
key = self.staged["key"]
value = self.staged["value"]
self.DB[key] = value
else:
key = self.staged["key"]
if self.DB.get(key):
self.DB.pop(key)
# empty the staged so we can vote accordingly if there is a tie
self.staged = None
|
interface.py | from Tkinter import *
from subprocess import Popen
from tkFileDialog import askopenfilename
import threading
import thread
import Image, ImageTk
import time
import sys
import signal
import os
import kirk
import scapy.all as sca
import scapy_ex
import channel_hop
import pickle
import numpy as n
fingerprint = {}
radiotap_formats = {"TSFT":"Q", "Flags":"B", "Rate":"B",
"Channel":"HH", "FHSS":"BB", "dBm_AntSignal":"b", "dBm_AntNoise":"b",
"Lock_Quality":"H", "TX_Attenuation":"H", "dB_TX_Attenuation":"H",
"dBm_TX_Power":"b", "Antenna":"B", "dB_AntSignal":"B",
"dB_AntNoise":"B", "b14":"H", "b15":"B", "b16":"B", "b17":"B", "b18":"B",
"b19":"BBB", "b20":"LHBB", "b21":"HBBBBBH", "b22":"B", "b23":"B",
"b24":"B", "b25":"B", "b26":"B", "b27":"B", "b28":"B", "b29":"B",
"b30":"B", "Ext":"B"}
def parsePacket(pkt):
if pkt.haslayer(sca.Dot11):
if pkt.addr2 is not None:
return pkt.addr2, pkt.dBm_AntSignal
return None, None
def piconnect(x, y):
f = open('data/' + str(x) + '_' + str(y), 'w+')
p = Popen("sshpass -p raspberry ssh pi@192.168.43.170 'cd ~/Desktop/RSSI; ./rssi_out.sh;'", stdout=f, shell=True, preexec_fn=os.setsid)#, stdout=stdout, stderr=f)
#Popen(['cd', '~/Desktop/RSSI'])
#p = Popen(['./rssi_log.sh'], stdout=f)
time.sleep(15)
os.killpg(p.pid, signal.SIGKILL)
f.close()
def record(x, y, iface):
now = time.time()
rssi={}
future = now + 10
while time.time() < future:
packets = sca.sniff(iface=iface, timeout = 10)
for pkt in packets:
mac, strength = parsePacket(pkt)
if mac is not None and strength is not None and strength < 0:
if mac in rssi:
rssi[mac][x][y].append(strength)
else:
if mac != "48:5a:3f:45:21:0f": #Filter out my cellphone
arr = [[[] for _ in range(kirk.x)] for _ in range(kirk.y)]
rssi.update({mac:arr})
rssi[mac][x][y].append(strength)
#Now that we have the data, calculate averages for each location
for mac in rssi:
if mac in fingerprint:
avg = fingerprint[mac]
else:
avg = [[None for _ in range(kirk.x)] for _ in range(kirk.y)]
for x in range(len(rssi[mac])):
for y in range(len(rssi[mac][x])):
l = rssi[mac][x][y]
if len(l) > 0:
avg[x][y] = n.mean(l)
#avg[x][y] = trimmean(l, 80)
fingerprint.update({mac:avg})
print fingerprint
finger_file = open(r'fingerprint.pkl', 'wb')
pickle.dump(fingerprint, finger_file)
finger_file.close()
#function to be called when mouse is clicked
def printcoords(event):
#outputting x and y coords to console
print (event.x,event.y)
record(event.x//box_size, event.y//box_size, iface)
print "DONE"
if __name__ == "__main__":
root = Tk()
# Start channel hopping
iface = channel_hop.get_mon_iface()
hop = threading.Thread(target=channel_hop.channel_hop, args=[iface])
hop.daemon = True
hop.start()
if(os.path.isfile('./fingerprint.pkl')):
fingerprint_file = open(r'fingerprint.pkl', 'rb')
fingerprint = pickle.load(fingerprint_file)
fingerprint_file.close()
width = kirk.width
height = kirk.height
box_size = kirk.box_size
img = ImageTk.PhotoImage(Image.open(kirk.File))
w = Canvas(root, width=width, height=height)
w.pack()
w.create_image(0,0,image=img,anchor="nw")
for x in range(1, width//box_size):
w.create_line(box_size*x, 0, box_size*x, height)
for y in range(1, height//box_size):
w.create_line(0, box_size*y, width, box_size*y)
#mouseclick event
w.bind("<Button 1>",printcoords)
root.mainloop()
|
LearnGUI_V2.py | from tkinter import *
from tkinter import messagebox, filedialog
from Modules import PublicModules as libs
from Modules import LSTM_Config as cf
import cv2
import threading
from PIL import Image
from PIL import ImageTk
from Modules.MyComponents import *
from tkcalendar import Calendar
from Modules.MyThreading import MyThreadingVideo
from threading import Thread
from time import sleep
from datetime import datetime
WINDOWS_WIDTH = int(1280 * 0.6)
WINDOWS_HEIGHT = int(720 * 0.6)
URL_VIDEO = 'FileInput/006.avi'
IS_USING_WEBCAM = False
CURSOR_DF = 'hand2'
CURSOR_NO = 'spider'
class EntryWithPlaceholder(Entry):
def __init__(self, master=None, placeholder="PLACEHOLDER", color='grey'):
super().__init__(master)
self.placeholder = placeholder
self.placeholder_color = color
self.default_fg_color = self['fg']
self.bind("<FocusIn>", self.foc_in)
self.bind("<FocusOut>", self.foc_out)
self.put_placeholder()
def put_placeholder(self):
self.insert(0, self.placeholder)
self['fg'] = self.placeholder_color
def foc_in(self, *args):
if self['fg'] == self.placeholder_color:
self.delete('0', 'end')
self['fg'] = self.default_fg_color
def foc_out(self, *args):
if not self.get():
self.put_placeholder()
class ChoseSourceWindow:
def __init__(self, master):
self.isUsingIpWebcam = IntVar()
self.master = master
self.master.minsize(500, 100)
self.frame = Frame(self.master)
# self.master.grab_set()
libs.fun_makeCenter(self.master)
self.DIALOG_OK = False
self.RETURN_RESULT = 'NULL'
self.iconCheck= PhotoImage(file='FileInput/Icons/ic_check2.png').subsample(3, 3)
self.iconMp4 = PhotoImage(file='FileInput/Icons/ic_check2.png').subsample(3, 3)
# goi sau cung nhe
self.fun_initComponent()
def fun_initComponent(self):
self.frame.grid(row=0, column=0, sticky='nsew')
self.master.grid_columnconfigure(0, weight=1)
self.master.grid_rowconfigure(0, weight=1)
# frame 1
self.frame1 = Frame(self.frame, bg='#95deff', padx=10, pady=10)
self.frame2 = Frame(self.frame, bg='#c1ffe5', padx=10, pady=10)
self.frame3 = Frame(self.frame, bg='#f7b5c7', padx=10, pady=10)
self.frame1.grid(row=0, column=0, sticky='nsew')
self.frame2.grid(row=1, column=0, sticky='nsew')
self.frame3.grid(row=2, column=0, sticky='nsew')
self.frame.grid_columnconfigure(0, weight=1)
self.frame.grid_rowconfigure(0, weight=1)
self.frame.grid_rowconfigure(1, weight=1)
self.frame.grid_rowconfigure(2, weight=1)
self.checkDir = Checkbutton(self.frame1, text='VIDEO FROM DISK...',
variable=self.isUsingIpWebcam, command=self.fun_CheckIsUsingCamChange,
padx=10, pady=10,
font=('Helvetica', 18, 'bold'),
cursor=CURSOR_DF
)
self.checkDir.grid(row=0, column=0, sticky='nsew')
self.frame1.grid_rowconfigure(0, weight=1)
self.frame1.grid_columnconfigure(0, weight=1)
self.tbSource = EntryWithPlaceholder(self.frame2, 'IP WEBCAM EXAMPLE: 192.168.1.1')
self.tbSource.grid(row=0, column=0, sticky='nsew')
self.btnSource = Button(self.frame2,
command=self.btnGetPathFromSourceClicked, cursor=CURSOR_DF,
image= self.iconCheck,
compound= CENTER,
bg='#c1ffe5'
)
self.btnSource.grid(row=0, column=1, sticky='nsew')
self.frame2.grid_columnconfigure(0, weight=9)
self.frame2.grid_columnconfigure(1, weight=1)
self.frame2.grid_rowconfigure(0, weight=1)
self.btnOk = Button(self.frame3, padx=10, pady=10, text='Load Video Clip'
, command=self.btnLoadVideoClicked,
state='disable',
cursor=CURSOR_NO
)
self.btnOk.grid(row=0, column=0, sticky='nsew')
self.frame3.grid_columnconfigure(0, weight=1)
self.frame3.grid_rowconfigure(0, weight=1)
# ----------- bo di tinh nang camera -------------
self.isUsingIpWebcam.set(1)
self.fun_CheckIsUsingCamChange()
self.checkDir.config(state='disable')
# ------------------------------------------------
def fun_CheckIsUsingCamChange(self):
if self.isUsingIpWebcam.get() == 0:
self.btnSource.config(image= self.iconCheck)
holder = 'IP WEBCAM EXAMPLE: 192.168.1.1'
self.checkDir.config(bg= 'white')
else:
self.btnSource.config(image= self.iconMp4)
holder = 'EXAMPLE: C:/VIDEO/DETECTION.MP4'
self.checkDir.config(bg= '#c1ffe5')
self.fun_reloadHolderSource(source=holder)
def fun_reloadHolderSource(self, source: str):
self.tbSource.delete('0', 'end')
self.tbSource.placeholder = source
self.tbSource.put_placeholder()
def fun_checkVideoFromSource(self, source: str):
try:
frames = libs.fun_getFramesOfVideo(path=source, count=20)
messagebox.showinfo('Notification!', 'Check Video Load OK, Video Size: {0}'.format(frames[0].shape))
return True
except:
messagebox.showerror('Error!', 'Request is not accepted')
return False
def fun_getURL_IPCam(self, ip: str):
return '{0}{1}{2}'.format('http://', ip, ':8080/video')
def btnLoadVideoClicked(self):
if self.isUsingIpWebcam.get() == 0:
self.RETURN_RESULT = self.fun_getURL_IPCam(ip=self.tbSource.get())
self.DIALOG_OK = True
self.master.destroy()
def btnGetPathFromSourceClicked(self):
if self.isUsingIpWebcam.get() == 0:
url = self.fun_getURL_IPCam(ip=self.tbSource.get())
else:
self.RETURN_RESULT = filedialog.askopenfilename(initialdir="FileInput/Test/", title="Select file",
filetypes=(("AVI files", "*.AVI"), ("MP4 files", "*.MP4"), ("ALL files", "*.*")))
self.fun_reloadHolderSource(source=self.RETURN_RESULT)
url = self.RETURN_RESULT
isCheck = self.fun_checkVideoFromSource(source=url)
if isCheck:
self.btnOk.config(state='normal', cursor=CURSOR_DF)
else:
self.btnOk.config(state='disable', cursor=CURSOR_NO)
def close_windows(self):
self.master.destroy()
class MyApp:
def __init__(self, title: str = 'GUI HUMAN''S VIOLENCE DETECTIONS'):
self.URL_VIDEO = URL_VIDEO
self.videoCap = None
self.title = title
self.root = Tk()
self.root.title(string=title)
self.stopEvent = None
self.IS_PAUSE = False
self.isChoiseTimeDown = IntVar()
self.valHourDown = StringVar()
self.valMinuteDown = StringVar()
self.containerTrai = None
self.containerPhai = None
self.root.minsize(width=WINDOWS_WIDTH, height=WINDOWS_HEIGHT)
# libs.fun_makeCenter(self.root)
libs.fun_makeMaximumSize(self.root)
# Load model VGG16
self.vgg16_model = cf.fun_getVGG16Model()
self.vgg16_model.summary()
# Load model LSTM
try:
self.lstm_model = cf.fun_loadModelLSTM()
self.lstm_model.summary()
except:
print('# '*20)
libs.fun_print(name= 'Load Model LSTM', value='File Not Found!');
return
self.initComponent()
def fun_updateTimeDown(self,):
while self.isChoiseTimeDown.get():
_, current = libs.fun_getCurrentTime()
isContinue, self.down = libs.fun_dayMinus(dayFrom= current, dayTo= self.dayTo)
try:
if not isContinue:
self.isChoiseTimeDown.set(0)
self.cbThoiGianTat.config(bg= 'white')
self.lbThoiGianDen.destroy()
self.lbThoiGianConLai.destroy()
self.fun_ngatKetNoi()
self.fun_ngatKetNoi()
return
self.lbThoiGianConLai.config(text= 'Time remaining: {0} s'.format(self.down))
except:
print('Thread update time down stoped but not working correct!'+ Thread.name)
sleep(1)
def fun_hienThoiGianDen_Con(self, timeDown: str, hourDown:int, minuteDown:int):
self.lbThoiGianConLai = Label(self.containerTongHopMoTaPhanDoanDanh,
text= 'Time remaining: {0} s'.format(self.down),
padx= 10, pady= 10,
font=('Helvetica', 18, 'bold'),
anchor= 'w'
)
self.lbThoiGianDen = Label(self.containerTongHopMoTaPhanDoanDanh,
text= 'Off on time: {0} {1}h : {2}m'.format(timeDown, hourDown, minuteDown),
padx= 10, pady= 10,
font=('Helvetica', 18, 'bold'),
anchor= 'w'
)
self.lbThoiGianDen.grid(row= 1, column= 0, sticky= 'nsew')
self.lbThoiGianConLai.grid(row= 2, column= 0, sticky= 'nsew')
self.containerTongHopMoTaPhanDoanDanh.grid_rowconfigure(1, weight= 1)
self.containerTongHopMoTaPhanDoanDanh.grid_rowconfigure(2, weight= 1)
def fun_chonThoiGianDen(self, ):
isChoise = self.isChoiseTimeDown.get()
self.TIME_DOWN = None
# 1 => get | 0 - not
if not isChoise:
self.lbThoiGianDen.destroy()
self.lbThoiGianConLai.destroy()
self.cbThoiGianTat.config(bg= 'white')
return
self.cbThoiGianTat.config(bg= '#95deff')
# hien thi form chon thoi gian
top = Toplevel(self.root)
cal = Calendar(top,
font="Arial 14", selectmode='day',
cursor="hand1",)
cal.selection_set(datetime.now())
cal.pack(fill="both", expand=True)
self.tbHour = EntryWithPlaceholder(top, placeholder= 'HOURS SELECTED')
self.tbHour.pack(fill= 'both', expand= True, padx= 10, pady= 10)
self.tbMinute = EntryWithPlaceholder(top, placeholder= 'MINUTE SELECTED')
self.tbMinute.pack(fill= 'both', expand= True, padx= 10, pady= 10)
Button(top, text="APPLY NOW", command= self.fun_btnOkChoiDataTimeDownClicked).pack(fill= 'both', expand= True, padx= 10, pady= 10)
self.cal = cal
self.ChoiseTimeDownWindow = top
top.grab_set()
self.root.wait_window(top)
if self.TIME_DOWN is None:
messagebox.showwarning('Notification!', 'Time to automatically turn off failure')
self.isChoiseTimeDown.set(0)
self.cbThoiGianTat.config(bg= 'white')
return
# khiem tra thoi gian hop le tai day
_, curent = libs.fun_getCurrentTime()
ymd = str(self.TIME_DOWN).split('-')
dayTo = '{0}_{1}_{2}_{3}_{4}_{5}'.format(ymd[0], ymd[1], ymd[2], self.hourDown, self.minuteDown, 0)
isCheck, down = libs.fun_dayMinus(dayFrom= curent, dayTo= dayTo)
if not isCheck:
messagebox.showerror('Notification!', 'Time to automatically turn off failure')
self.isChoiseTimeDown.set(0)
self.cbThoiGianTat.config(bg= 'white')
return
self.dayTo = dayTo
self.down = down
self.fun_hienThoiGianDen_Con(timeDown= self.TIME_DOWN, hourDown= self.hourDown, minuteDown= self.minuteDown)
# start new thread update downtime
self.threadUpdateTime = Thread(target= self.fun_updateTimeDown)
self.threadUpdateTime.setDaemon(True)
self.threadUpdateTime.start()
def fun_btnOkChoiDataTimeDownClicked(self, ):
self.TIME_DOWN = self.cal.selection_get()
hour = 0
minu = 0
try:
hour = int(self.tbHour.get())
minu = int(self.tbMinute.get())
except:
messagebox.showwarning('Error!', 'Error white pare hour and minute-> but default [ {0} : {1} ]'.format(hour, minu))
self.hourDown = hour
self.minuteDown = minu
self.ChoiseTimeDownWindow.destroy()
def initComponent(self):
#
self.containerTrai = Frame(self.root, bg='white', padx=10, pady=10)
self.containerPhai = Frame(self.root, bg='white', padx=10, pady=10)
self.containerTrai.grid(row=0, column=0, sticky='nsew')
self.containerPhai.grid(row=0, column=1, sticky='nsew')
self.root.grid_columnconfigure(0, weight=8)
self.root.grid_columnconfigure(1, weight=2)
self.root.grid_rowconfigure(0, weight=1)
# Container con cua trai
self.containerChonNguonDuLieu = Frame(self.containerTrai, bg='#95deff', padx=10, pady=10)
self.containerVideoCamera = Frame(self.containerTrai, bg='#c1ffe5', padx=10, pady=10)
self.containerChucNang = Frame(self.containerTrai, bg='#f7b5c7', padx=10, pady=10)
self.containerChonNguonDuLieu.grid(row=0, column=0, sticky='nsew')
self.containerVideoCamera.grid(row=1, column=0, sticky='nsew')
self.containerChucNang.grid(row=2, column=0, sticky='nsew')
self.containerTrai.grid_columnconfigure(0, weight=1)
self.containerTrai.grid_rowconfigure(0, weight=1)
self.containerTrai.grid_rowconfigure(1, weight=8)
self.containerTrai.grid_rowconfigure(2, weight=1)
# giao dien cho button chon nguon du lieu
iconChonNguonDuLieu = PhotoImage(file='FileInput/Icons/ic_dir.png')
# Resizing image to fit on button
iconChonNguonDuLieu = iconChonNguonDuLieu.subsample(1, 1)
self.btnChonNguonDuLieu = Button(self.containerChonNguonDuLieu, padx=10,
pady=10, text='INSERT VIDEO FROM SOURCE...',
command=self.fun_chonNguonDuLieu,
# bg='green',
cursor=CURSOR_DF,
font=('Helvetica', 18, 'bold'),
image=iconChonNguonDuLieu,
compound=LEFT
)
self.btnChonNguonDuLieu.image=iconChonNguonDuLieu
self.btnChonNguonDuLieu.grid(row=0, column=0, sticky='nsew')
# Giao dien cho nut load lai video
iconTaiLaiVideo = PhotoImage(file='FileInput/Icons/ic_process.png')
# Resizing image to fit on button
iconTaiLaiVideo = iconTaiLaiVideo.subsample(1, 1)
self.btnRefresh = Button(self.containerChonNguonDuLieu, padx=10,
pady=10,
# bg='green',
# text='Tai lai video',
command=self.fun_taiLaiVideo,
state='disable',
cursor=CURSOR_NO,
image=iconTaiLaiVideo,
compound=CENTER
)
self.btnRefresh.image= iconTaiLaiVideo
self.btnRefresh.grid(row=0, column=1, sticky='nsew')
# Giao dien cho nut ngat ket noi
iconNgatKetNoi = PhotoImage(file='FileInput/Icons/ic_powerof.png')
iconNgatKetNoi = iconNgatKetNoi.subsample(1, 1)
self.btnDisconection = Button(self.containerChonNguonDuLieu, padx=10,
pady=10,
# bg='green',
# text='Ngat Ke Noi',
image=iconNgatKetNoi,
command=self.fun_ngatKetNoi,
cursor=CURSOR_DF,
compound=CENTER
)
self.btnDisconection.imgage=iconNgatKetNoi
self.btnDisconection.grid(row=0, column=2, sticky='nsew')
self.containerChonNguonDuLieu.grid_columnconfigure(0, weight=8)
self.containerChonNguonDuLieu.grid_columnconfigure(1, weight=1)
self.containerChonNguonDuLieu.grid_columnconfigure(2, weight=1)
self.containerChonNguonDuLieu.grid_rowconfigure(0, weight=1)
# Container con cua phai
self.containerPhanDoanBaoLuc = Frame(self.containerPhai, bg='#95deff', padx=10, pady=10)
self.containerTongHopMoTaPhanDoanDanh = Frame(self.containerPhai, bg='#c1ffe5', padx=10, pady=10)
self.containerPhanDoanBaoLuc.grid(row=0, column=0, sticky='nsew')
self.containerTongHopMoTaPhanDoanDanh.grid(row=1, column=0, sticky='nsew')
# Container con cua container containerTongHopMoTaPhanDoanDanh
self.cbThoiGianTat = Checkbutton(self.containerTongHopMoTaPhanDoanDanh,
text= 'TIME SELECTION AUTO OFF', padx= 10, pady= 10,
font=('Helvetica', 18, 'bold'),
variable= self.isChoiseTimeDown,
command= self.fun_chonThoiGianDen,
)
self.cbThoiGianTat.grid(row= 0, column= 0, sticky= 'nsew')
self.containerTongHopMoTaPhanDoanDanh.grid_rowconfigure(0, weight= 1)
self.containerTongHopMoTaPhanDoanDanh.grid_columnconfigure(0, weight= 1)
# Label hien thi loai bao luc gi
self.lbKetQuaBaoLuc = Label(self.containerChucNang,
text='TÊN BẠO LỰC HIỂN THỊ TẠI ĐÂY', padx=10,
pady=10,
bg='white',
font=('Helvetica', 18, 'bold')
)
self.lbKetQuaBaoLuc.grid(row=0, column=0, sticky='nsew')
self.containerChucNang.grid_rowconfigure(0, weight=1)
self.containerChucNang.grid_columnconfigure(0, weight=1)
self.containerPhai.grid_rowconfigure(0, weight=9)
self.containerPhai.grid_rowconfigure(1, weight=1)
self.containerPhai.grid_columnconfigure(0, weight=1)
# Container con cua ContainerVideoFrames
self.lbVideoFrames = Label(self.containerVideoCamera, bg='white', padx=10, pady=10)
self.lbVideoFrames.grid(row=0, column=0, sticky='nsew')
self.containerVideoCamera.grid_rowconfigure(0, weight=1)
self.containerVideoCamera.grid_columnconfigure(0, weight=1)
self.makePhanDoanBaoLucGUI6()
# self.videoLoadingThreading()
self.root.wm_protocol('VM_DELETE_WINDOW', self.onClose)
self.fun_initGUI()
self.fun_taiGiaoDien17CapDo()
def fun_initGUI(self):
img = cv2.imread(filename= 'FileInput/Imgs/ImgNotFound2.jpg')
img1 = cv2.imread(filename= 'FileInput/Imgs/ImgNotFound.jpg')
size = libs.fun_getSizeOfFrame(frame= img)
self.imgNotFound = libs.fun_cv2_imageArrayToImage(containerFather= self.containerVideoCamera, frame= img, reSize= size)
self.imgNotFound1 = libs.fun_cv2_imageArrayToImage(containerFather= self.containerVideoCamera, frame= img1, reSize= (int(size[0] * 0.2), int(size[1] * 0.2)))
self.lbVideoFrames.config(image= self.imgNotFound)
self.lbVideoFrames1.config(image= self.imgNotFound1)
self.lbVideoFrames2.config(image= self.imgNotFound1)
self.lbVideoFrames3.config(image= self.imgNotFound1)
self.lbVideoFrames4.config(image= self.imgNotFound1)
def fun_ngatKetNoi(self):
if self.stopEvent is None:
return
self.stopEvent.set()
self.fun_initGUI()
def fun_taiLaiVideo(self):
self.btnRefresh.config(state='disable', cursor=CURSOR_NO)
self.videoLoadingThreading()
def fun_taiGiaoDien17CapDo(self):
# Giao dien cho container 17 Cap do
pass # Nothing to do
# event cho button chon nguon du lieu
def fun_chonNguonDuLieu(self):
self.newWindow = Toplevel(self.root)
self.app = ChoseSourceWindow(self.newWindow)
self.app.master.grab_set()
self.root.wait_window(self.app.master)
# Hanh dong khong duoc xac thuc tu nguoi dung -> ket thuc
if not self.app.DIALOG_OK:
messagebox.showwarning('Notification!', 'Failed video source selection')
return
# Hang dong duoc xac thuc tu phai nguoi dung
self.URL_VIDEO = self.app.RETURN_RESULT
self.fun_taiGiaoDien17CapDo()
# Truoc khi load video va xem ket qua, kich hoat GPU len truoc
self.fun_kichHoatGPU_Threading()
# Thuc hien load video bang threading va xem ket qua
# self.videoLoadingThreading()
def fun_kichHoatGPU(self,):
# khoi tao kich hoat
self.ACTIVE_OK = False
# show by threading
t_loading = threading.Thread(target= self.fun_showLoadingCMD, args=())
t_loading.setDaemon(True)
t_loading.start()
# kich hoat GPU
MAX_COUNT_ACTIVE = 5
for _ in range(MAX_COUNT_ACTIVE):
_20F = libs.fun_getFramesOfVideo(self.URL_VIDEO, count= 20)
transfer = cf.fun_getTransferValue_EDIT(pathVideoOrListFrame= _20F, modelVGG16= self.vgg16_model);
libs.fun_predict(modelLSTM= self.lstm_model, transferValue= transfer, isPrint= True)
# OK
self.ACTIVE_OK = True
def fun_showLoadingCMD(self,):
while not self.ACTIVE_OK:
os.system("start /wait cmd /c LoadingCMD.py")
self.videoLoadingThreading()
def fun_kichHoatGPU_Threading(self,):
t_kichHoatGPU = threading.Thread(target=self.fun_kichHoatGPU, args=())
t_kichHoatGPU.setDaemon(True)
t_kichHoatGPU.start()
def makePhanDoanBaoLucGUI6(self):
self.treeAction = TreeActionDetection(containerFather= self.containerPhanDoanBaoLuc)
self.frameVideo1 = Frame(self.containerPhanDoanBaoLuc, padx=10, pady=10, bg='white')
self.frameVideo2 = Frame(self.containerPhanDoanBaoLuc, padx=10, pady=10, bg='#c1ffe5')
self.frameVideo3 = Frame(self.containerPhanDoanBaoLuc, padx=10, pady=10, bg='#c1ffe5')
self.frameVideo4 = Frame(self.containerPhanDoanBaoLuc, padx=10, pady=10, bg='white')
'''
Lam chi de cho LeanGUI thoi LeanGUI_V2 Khong co su dung ne tat di
So di lam nhu vay boi bi khong phai dung cham gi den cac class khac
ma van hoat dong duoc tren ca 2 GUI va GUI V2
'''
# self.frameVideo1.grid(row=0, column=0, sticky='nsew')
# self.frameVideo2.grid(row=0, column=1, sticky='nsew')
# self.frameVideo3.grid(row=1, column=0, sticky='nsew')
# self.frameVideo4.grid(row=1, column=1, sticky='nsew')
self.containerPhanDoanBaoLuc.grid_rowconfigure(0, weight=1)
# self.containerPhanDoanBaoLuc.grid_rowconfigure(1, weight=1)
self.containerPhanDoanBaoLuc.grid_columnconfigure(0, weight=1)
# self.containerPhanDoanBaoLuc.grid_columnconfigure(1, weight=1)
# phan doan 1
self.lbVideoFrames1 = Label(self.frameVideo1, padx=10, pady=10, bg='white')
self.lbVideoFrames1.grid(row=0, column=0, sticky='nsew')
self.frameVideo1.grid_rowconfigure(0, weight=1)
self.frameVideo1.grid_columnconfigure(0, weight=1)
# phan doan 2
self.lbVideoFrames2 = Label(self.frameVideo2, padx=10, pady=10, bg='white')
self.lbVideoFrames2.grid(row=0, column=0, sticky='nsew')
self.frameVideo2.grid_rowconfigure(0, weight=1)
self.frameVideo2.grid_columnconfigure(0, weight=1)
# phan doan 3
self.lbVideoFrames3 = Label(self.frameVideo3, padx=10, pady=10, bg='white')
self.lbVideoFrames3.grid(row=0, column=0, sticky='nsew')
self.frameVideo3.grid_rowconfigure(0, weight=1)
self.frameVideo3.grid_columnconfigure(0, weight=1)
# phan doan 4
self.lbVideoFrames4 = Label(self.frameVideo4, padx=10, pady=10, bg='white')
self.lbVideoFrames4.grid(row=0, column=0, sticky='nsew')
self.frameVideo4.grid_rowconfigure(0, weight=1)
self.frameVideo4.grid_columnconfigure(0, weight=1)
self.arrThread = []
thread1 = MyThreadingVideo(lbShow=None, lbFather=self.frameVideo1, lbShowKetQua= self.lbKetQuaBaoLuc, vgg16_model= self.vgg16_model, lstm_model= self.lstm_model, treeAction= self.treeAction)
thread2 = MyThreadingVideo(lbShow=None, lbFather=self.frameVideo2, lbShowKetQua= self.lbKetQuaBaoLuc, vgg16_model= self.vgg16_model, lstm_model= self.lstm_model, treeAction= self.treeAction)
thread3 = MyThreadingVideo(lbShow=None, lbFather=self.frameVideo3, lbShowKetQua= self.lbKetQuaBaoLuc, vgg16_model= self.vgg16_model, lstm_model= self.lstm_model, treeAction= self.treeAction)
thread4 = MyThreadingVideo(lbShow=None, lbFather=self.frameVideo4, lbShowKetQua= self.lbKetQuaBaoLuc, vgg16_model= self.vgg16_model, lstm_model= self.lstm_model, treeAction= self.treeAction)
self.arrThread.append(thread1)
self.arrThread.append(thread3)
self.arrThread.append(thread4)
self.arrThread.append(thread2)
def runMyApp(self):
self.root.mainloop()
def videoLoadingThreading(self):
self.stopEvent = threading.Event()
self.loadVideoThread = threading.Thread(target=self.updateVideoFrames, args=())
self.loadVideoThread.setDaemon(True)
self.loadVideoThread.start()
def updateVideoFrames(self):
self.videoCap = cv2.VideoCapture(self.URL_VIDEO)
self.isContinue, self.frame = self.videoCap.read()
count = 0
xoayVong = 0
frames = []
while not self.stopEvent.is_set() and self.isContinue:
image = libs.fun_cv2_imageArrayToImage(containerFather= self.containerVideoCamera, frame= self.frame.copy())
self.lbVideoFrames.config(image=image)
self.lbVideoFrames.image = image
isContinue, self.frame = self.videoCap.read()
# Doc khong duoc la het video -> nho thoat ra
if not isContinue:
break
frames.append(self.frame.copy())
cv2.waitKey(7)
if count == 19:
self.arrThread[xoayVong].setFrames(frames)
self.arrThread[xoayVong].startShowVideo()
xoayVong += 1
if xoayVong == 4:
xoayVong = 0
frames = []
count = 0
continue
count += 1
self.btnRefresh.config(state='normal', cursor=CURSOR_DF)
if not self.IS_PAUSE:
self.videoCap.release()
def onClose(self):
libs.fun_print(name='Violence Detect App', value='Closing')
self.videoCap.release()
self.root.destroy()
sys.exit(0)
if __name__ == '__main__':
if IS_USING_WEBCAM:
URL_VIDEO = 0
videoCap = cv2.VideoCapture(URL_VIDEO)
app = MyApp()
app.runMyApp()
|
dataset_generator.py | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate D4RL TFRecord dataset that is compatible with TF-Agents."""
# Lint as: python3
import functools
import os
from absl import app
from absl import flags
from absl import logging
import d4rl # pylint: disable=unused-import
import gym
from tf_agents.examples.cql_sac.kumar20.dataset import dataset_utils
from tf_agents.examples.cql_sac.kumar20.dataset import file_utils
from tf_agents.system import system_multiprocessing as multiprocessing
# Using XM. # pylint: disable=unused-import
flags.DEFINE_string('root_dir', '/tmp/dataset/', 'Output dataset directory.')
flags.DEFINE_string(
'env_name', 'hopper-medium-v0', 'Env name. '
'Should match one of keys in d4rl.infos.DATASET_URLS')
flags.DEFINE_integer('replicas', None,
'Number of parallel replicas generating evaluations.')
flags.DEFINE_integer(
'replica_id', None,
'Replica id. If not None, only generate for this replica slice.')
flags.DEFINE_bool(
'use_trajectories', False,
'Whether to save samples as trajectories. If False, save as transitions.')
flags.DEFINE_bool(
'exclude_timeouts', False, 'Whether to exclude the final episode step '
'if it from a timeout instead of a terminal.')
FLAGS = flags.FLAGS
def main(_):
logging.set_verbosity(logging.INFO)
d4rl_env = gym.make(FLAGS.env_name)
d4rl_dataset = d4rl_env.get_dataset()
root_dir = os.path.join(FLAGS.root_dir, FLAGS.env_name)
dataset_dict = dataset_utils.create_episode_dataset(
d4rl_dataset,
FLAGS.exclude_timeouts,
observation_dtype=d4rl_env.observation_space.dtype)
num_episodes = len(dataset_dict['episode_start_index'])
logging.info('Found %d episodes, %s total steps.', num_episodes,
len(dataset_dict['states']))
collect_data_spec = dataset_utils.create_collect_data_spec(
dataset_dict, use_trajectories=FLAGS.use_trajectories)
logging.info('Collect data spec %s', collect_data_spec)
num_replicas = FLAGS.replicas or 1
interval_size = num_episodes // num_replicas + 1
# If FLAGS.replica_id is set, only run that section of the dataset.
# This is useful if distributing the replicas on Borg.
if FLAGS.replica_id is not None:
file_name = '%s_%d.tfrecord' % (FLAGS.env_name, FLAGS.replica_id)
start_index = FLAGS.replica_id * interval_size
end_index = min((FLAGS.replica_id + 1) * interval_size, num_episodes)
file_utils.write_samples_to_tfrecord(
dataset_dict=dataset_dict,
collect_data_spec=collect_data_spec,
dataset_path=os.path.join(root_dir, file_name),
start_episode=start_index,
end_episode=end_index,
use_trajectories=FLAGS.use_trajectories)
else:
# Otherwise, parallelize with tf_agents.system.multiprocessing.
jobs = []
context = multiprocessing.get_context()
for i in range(num_replicas):
if num_replicas == 1:
file_name = '%s.tfrecord' % FLAGS.env_name
else:
file_name = '%s_%d.tfrecord' % (FLAGS.env_name, i)
dataset_path = os.path.join(root_dir, file_name)
start_index = i * interval_size
end_index = min((i + 1) * interval_size, num_episodes)
kwargs = dict(
dataset_dict=dataset_dict,
collect_data_spec=collect_data_spec,
dataset_path=dataset_path,
start_episode=start_index,
end_episode=end_index,
use_trajectories=FLAGS.use_trajectories)
job = context.Process(
target=file_utils.write_samples_to_tfrecord, kwargs=kwargs)
job.start()
jobs.append(job)
for job in jobs:
job.join()
if __name__ == '__main__':
multiprocessing.handle_main(functools.partial(app.run, main))
|
scheduler.py | import logging
import threading
import time
from datetime import datetime
class SimpleTaskScheduler:
""" Simple task scheduler
Schedules a task every chosen intervall (seconds)
NB: If the task does not finish at all, the first time, there will not be raised an exception
:raises Exception: Last task is not done when task is started
"""
def __init__(self, task, runIntervalInSeconds, delay, checkIntervalInSeconds):
"""Constructor
The task will run on whole time periodes + delay.
:param task: the task to be done
:type task: method
:param runIntervalInSeconds: How often the task will be executed in seconds
:type runIntervalInSeconds: int
:param delay: Delay after whole time period, in seconds
:type delay: int
:param checkIntervalInSeconds: How often the program should check if it should start the task (possible delay before the task executes), in seconds
:type checkIntervalInSeconds: int
"""
self.__runIntervalInSeconds = runIntervalInSeconds
self.__delay = delay
self.__checkIntervalInSeconds = checkIntervalInSeconds
self.__runflag = True
self.__task = task
self.__starterThread = threading.Thread(target=self.__starterThreadMethod, args=())
def start(self):
"""Start the scheduler
"""
self.__starterThread.start()
def join(self):
"""Join the scheduler task - Wait for the the scheduler to finish
"""
if self.__starterThread.isAlive():
self.__starterThread.join()
def stop(self):
"""Stop the scheduler
"""
self.__runflag = False
def __starterThreadMethod(self):
"""Thread of the scheduler. Executes the task and insures that the interval is held.
:raises Exception: Raises exception if the task is taking longer time than the intervall of the task
"""
t = time.time()
self.__starttime = (
t - (t % self.__runIntervalInSeconds) + self.__runIntervalInSeconds + self.__delay
)
while self.__runflag:
if self.__starttime < time.time():
raise Exception("Last task was not done -> increase the interval")
while self.__starttime > time.time():
time.sleep(self.__checkIntervalInSeconds)
worker = threading.Thread(
target=self.__task, args=(datetime.fromtimestamp(self.__starttime),)
)
self.__starttime = self.__starttime + self.__runIntervalInSeconds
worker.start()
worker.join()
|
grid_search.py | """
Gridsearch implementation
"""
import os
from hops import hdfs as hopshdfs
from hops import tensorboard
from hops import devices
from hops import util
import pydoop.hdfs
import threading
import six
import datetime
run_id = 0
def _grid_launch(sc, map_fun, args_dict, direction='max', local_logdir=False, name="no-name"):
"""
Run the wrapper function with each hyperparameter combination as specified by the dictionary
Args:
sc:
map_fun:
args_dict:
direction:
local_logdir:
name:
Returns:
"""
global run_id
app_id = str(sc.applicationId)
num_executions = 1
if direction != 'max' and direction != 'min':
raise ValueError('Invalid direction ' + direction + ', must be max or min')
arg_lists = list(args_dict.values())
currentLen = len(arg_lists[0])
for i in range(len(arg_lists)):
if currentLen != len(arg_lists[i]):
raise ValueError('Length of each function argument list must be equal')
num_executions = len(arg_lists[i])
#Each TF task should be run on 1 executor
nodeRDD = sc.parallelize(range(num_executions), num_executions)
#Make SparkUI intuitive by grouping jobs
sc.setJobGroup("Grid Search", "{} | Hyperparameter Optimization".format(name))
#Force execution on executor, since GPU is located on executor
job_start = datetime.datetime.now()
nodeRDD.foreachPartition(_prepare_func(app_id, run_id, map_fun, args_dict, local_logdir))
job_end = datetime.datetime.now()
job_time_str = util._time_diff(job_start, job_end)
arg_count = six.get_function_code(map_fun).co_argcount
arg_names = six.get_function_code(map_fun).co_varnames
hdfs_appid_dir = hopshdfs._get_experiments_dir() + '/' + app_id
hdfs_runid_dir = _get_logdir(app_id)
max_val, max_hp, min_val, min_hp, avg = _get_best(args_dict, num_executions, arg_names, arg_count, hdfs_appid_dir, run_id)
param_combination = ""
best_val = ""
if direction == 'max':
param_combination = max_hp
best_val = str(max_val)
results = '\n------ Grid Search results ------ direction(' + direction + ') \n' \
'BEST combination ' + max_hp + ' -- metric ' + str(max_val) + '\n' \
'WORST combination ' + min_hp + ' -- metric ' + str(min_val) + '\n' \
'AVERAGE metric -- ' + str(avg) + '\n' \
'Total job time ' + job_time_str + '\n'
_write_result(hdfs_runid_dir, results)
print(results)
elif direction == 'min':
param_combination = min_hp
best_val = str(min_val)
results = '\n------ Grid Search results ------ direction(' + direction + ') \n' \
'BEST combination ' + min_hp + ' -- metric ' + str(min_val) + '\n' \
'WORST combination ' + max_hp + ' -- metric ' + str(max_val) + '\n' \
'AVERAGE metric -- ' + str(avg) + '\n' \
'Total job time ' + job_time_str + '\n'
_write_result(hdfs_runid_dir, results)
print(results)
print('Finished Experiment \n')
return hdfs_runid_dir, param_combination, best_val
def _get_logdir(app_id):
"""
Args:
app_id:
Returns:
"""
global run_id
return hopshdfs._get_experiments_dir() + '/' + app_id + '/grid_search/run.' + str(run_id)
def _write_result(runid_dir, string):
"""
Args:
runid_dir:
string:
Returns:
"""
metric_file = runid_dir + '/summary'
fs_handle = hopshdfs.get_fs()
try:
fd = fs_handle.open_file(metric_file, mode='w')
except:
fd = fs_handle.open_file(metric_file, flags='w')
fd.write(string.encode())
fd.flush()
fd.close()
def _prepare_func(app_id, run_id, map_fun, args_dict, local_logdir):
"""
Args:
app_id:
run_id:
map_fun:
args_dict:
local_logdir:
Returns:
"""
def _wrapper_fun(iter):
"""
Args:
iter:
Returns:
"""
for i in iter:
executor_num = i
tb_hdfs_path = ''
hdfs_exec_logdir = ''
t = threading.Thread(target=devices._print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t.start()
try:
#Arguments
if args_dict:
argcount = six.get_function_code(map_fun).co_argcount
names = six.get_function_code(map_fun).co_varnames
args = []
argIndex = 0
param_string = ''
while argcount > 0:
#Get args for executor and run function
param_name = names[argIndex]
param_val = args_dict[param_name][executor_num]
param_string += str(param_name) + '=' + str(param_val) + '.'
args.append(param_val)
argcount -= 1
argIndex += 1
param_string = param_string[:-1]
hdfs_exec_logdir, hdfs_appid_logdir = hopshdfs._create_directories(app_id, run_id, param_string, 'grid_search')
pydoop.hdfs.dump('', os.environ['EXEC_LOGFILE'], user=hopshdfs.project_user())
hopshdfs._init_logger()
tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_appid_logdir, executor_num, local_logdir=local_logdir)
gpu_str = '\nChecking for GPUs in the environment' + devices._get_gpu_info()
hopshdfs.log(gpu_str)
print(gpu_str)
print('-------------------------------------------------------')
print('Started running task ' + param_string + '\n')
hopshdfs.log('Started running task ' + param_string)
task_start = datetime.datetime.now()
retval = map_fun(*args)
task_end = datetime.datetime.now()
_handle_return(retval, hdfs_exec_logdir)
time_str = 'Finished task ' + param_string + ' - took ' + util._time_diff(task_start, task_end)
print('\n' + time_str)
print('Returning metric ' + str(retval))
print('-------------------------------------------------------')
hopshdfs.log(time_str)
except:
#Always do cleanup
_cleanup(tb_hdfs_path)
if devices.get_num_gpus() > 0:
t.do_run = False
t.join()
raise
finally:
if local_logdir:
local_tb = tensorboard.local_logdir_path
util._store_local_tensorboard(local_tb, hdfs_exec_logdir)
_cleanup(tb_hdfs_path)
if devices.get_num_gpus() > 0:
t.do_run = False
t.join()
return _wrapper_fun
def _get_best(args_dict, num_combinations, arg_names, arg_count, hdfs_appid_dir, run_id):
"""
Args:
args_dict:
num_combinations:
arg_names:
arg_count:
hdfs_appid_dir:
run_id:
Returns:
"""
max_hp = ''
max_val = ''
min_hp = ''
min_val = ''
results = []
first = True
for i in range(num_combinations):
argIndex = 0
param_string = ''
num_args = arg_count
while num_args > 0:
#Get args for executor and run function
param_name = arg_names[argIndex]
param_val = args_dict[param_name][i]
param_string += str(param_name) + '=' + str(param_val) + '.'
num_args -= 1
argIndex += 1
param_string = param_string[:-1]
path_to_metric = hdfs_appid_dir + '/grid_search/run.' + str(run_id) + '/' + param_string + '/metric'
metric = None
with pydoop.hdfs.open(path_to_metric, "r") as fi:
metric = float(fi.read())
fi.close()
if first:
max_hp = param_string
max_val = metric
min_hp = param_string
min_val = metric
first = False
if metric > max_val:
max_val = metric
max_hp = param_string
if metric < min_val:
min_val = metric
min_hp = param_string
results.append(metric)
avg = sum(results)/float(len(results))
return max_val, max_hp, min_val, min_hp, avg
def _handle_return(val, hdfs_exec_logdir):
"""
Args:
val:
hdfs_exec_logdir:
Returns:
"""
try:
test = int(val)
except:
raise ValueError('Your function needs to return a metric (number) which should be maximized or minimized')
metric_file = hdfs_exec_logdir + '/metric'
fs_handle = hopshdfs.get_fs()
try:
fd = fs_handle.open_file(metric_file, mode='w')
except:
fd = fs_handle.open_file(metric_file, flags='w')
fd.write(str(float(val)).encode())
fd.flush()
fd.close()
def _cleanup(tb_hdfs_path):
"""
Args:
tb_hdfs_path:
Returns:
"""
handle = hopshdfs.get()
if not tb_hdfs_path == None and not tb_hdfs_path == '' and handle.exists(tb_hdfs_path):
handle.delete(tb_hdfs_path)
hopshdfs._kill_logger() |
bf_manager.py | from multiprocessing import Process
from bf8 import start, BFAlgo8, load_progress
if __name__ == '__main__':
while True:
values = BFAlgo8.generate_all_values()
progress = load_progress()
print('Cur progress: {}/{}'.format(progress, len(values)))
if progress == len(values):
break
p = Process(target=start)
p.start()
p.join() |
test_threaded_import.py | # This is a variant of the very old (early 90's) file
# Demo/threads/bug.py. It simply provokes a number of threads into
# trying to import the same module "at the same time".
# There are no pleasant failure modes -- most likely is that Python
# complains several times about module random having no attribute
# randrange, and then Python hangs.
import _imp as imp
import os
import importlib
import sys
import time
import shutil
import threading
import unittest
from unittest import mock
from test.support import (
verbose, import_module, run_unittest, TESTFN, reap_threads,
forget, unlink, rmtree, start_threads)
def task(N, done, done_tasks, errors):
try:
# We don't use modulefinder but still import it in order to stress
# importing of different modules from several threads.
if len(done_tasks) % 2:
import modulefinder
import random
else:
import random
import modulefinder
# This will fail if random is not completely initialized
x = random.randrange(1, 3)
except Exception as e:
errors.append(e.with_traceback(None))
finally:
done_tasks.append(threading.get_ident())
finished = len(done_tasks) == N
if finished:
done.set()
def mock_register_at_fork(func):
# bpo-30599: Mock os.register_at_fork() when importing the random module,
# since this function doesn't allow to unregister callbacks and would leak
# memory.
return mock.patch('os.register_at_fork', create=True)(func)
# Create a circular import structure: A -> C -> B -> D -> A
# NOTE: `time` is already loaded and therefore doesn't threaten to deadlock.
circular_imports_modules = {
'A': """if 1:
import time
time.sleep(%(delay)s)
x = 'a'
import C
""",
'B': """if 1:
import time
time.sleep(%(delay)s)
x = 'b'
import D
""",
'C': """import B""",
'D': """import A""",
}
class Finder:
"""A dummy finder to detect concurrent access to its find_spec()
method."""
def __init__(self):
self.numcalls = 0
self.x = 0
self.lock = threading.Lock()
def find_spec(self, name, path=None, target=None):
# Simulate some thread-unsafe behaviour. If calls to find_spec()
# are properly serialized, `x` will end up the same as `numcalls`.
# Otherwise not.
assert imp.lock_held()
with self.lock:
self.numcalls += 1
x = self.x
time.sleep(0.01)
self.x = x + 1
class FlushingFinder:
"""A dummy finder which flushes sys.path_importer_cache when it gets
called."""
def find_spec(self, name, path=None, target=None):
sys.path_importer_cache.clear()
class ThreadedImportTests(unittest.TestCase):
def setUp(self):
self.old_random = sys.modules.pop('random', None)
def tearDown(self):
# If the `random` module was already initialized, we restore the
# old module at the end so that pickling tests don't fail.
# See http://bugs.python.org/issue3657#msg110461
if self.old_random is not None:
sys.modules['random'] = self.old_random
@mock_register_at_fork
def check_parallel_module_init(self, mock_os):
if imp.lock_held():
# This triggers on, e.g., from test import autotest.
raise unittest.SkipTest("can't run when import lock is held")
done = threading.Event()
for N in (20, 50) * 3:
if verbose:
print("Trying", N, "threads ...", end=' ')
# Make sure that random and modulefinder get reimported freshly
for modname in ['random', 'modulefinder']:
try:
del sys.modules[modname]
except KeyError:
pass
errors = []
done_tasks = []
done.clear()
t0 = time.monotonic()
with start_threads(threading.Thread(target=task,
args=(N, done, done_tasks, errors,))
for i in range(N)):
pass
completed = done.wait(10 * 60)
dt = time.monotonic() - t0
if verbose:
print("%.1f ms" % (dt*1e3), flush=True, end=" ")
dbg_info = 'done: %s/%s' % (len(done_tasks), N)
self.assertFalse(errors, dbg_info)
self.assertTrue(completed, dbg_info)
if verbose:
print("OK.")
def test_parallel_module_init(self):
self.check_parallel_module_init()
def test_parallel_meta_path(self):
finder = Finder()
sys.meta_path.insert(0, finder)
try:
self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(finder)
def test_parallel_path_hooks(self):
# Here the Finder instance is only used to check concurrent calls
# to path_hook().
finder = Finder()
# In order for our path hook to be called at each import, we need
# to flush the path_importer_cache, which we do by registering a
# dedicated meta_path entry.
flushing_finder = FlushingFinder()
def path_hook(path):
finder.find_spec('')
raise ImportError
sys.path_hooks.insert(0, path_hook)
sys.meta_path.append(flushing_finder)
try:
# Flush the cache a first time
flushing_finder.find_spec('')
numtests = self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(flushing_finder)
sys.path_hooks.remove(path_hook)
def test_import_hangers(self):
# In case this test is run again, make sure the helper module
# gets loaded from scratch again.
try:
del sys.modules['test.threaded_import_hangers']
except KeyError:
pass
import test.threaded_import_hangers
self.assertFalse(test.threaded_import_hangers.errors)
def test_circular_imports(self):
# The goal of this test is to exercise implementations of the import
# lock which use a per-module lock, rather than a global lock.
# In these implementations, there is a possible deadlock with
# circular imports, for example:
# - thread 1 imports A (grabbing the lock for A) which imports B
# - thread 2 imports B (grabbing the lock for B) which imports A
# Such implementations should be able to detect such situations and
# resolve them one way or the other, without freezing.
# NOTE: our test constructs a slightly less trivial import cycle,
# in order to better stress the deadlock avoidance mechanism.
delay = 0.5
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
for name, contents in circular_imports_modules.items():
contents = contents % {'delay': delay}
with open(os.path.join(TESTFN, name + ".py"), "wb") as f:
f.write(contents.encode('utf-8'))
self.addCleanup(forget, name)
importlib.invalidate_caches()
results = []
def import_ab():
import A
results.append(getattr(A, 'x', None))
def import_ba():
import B
results.append(getattr(B, 'x', None))
t1 = threading.Thread(target=import_ab)
t2 = threading.Thread(target=import_ba)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(set(results), {'a', 'b'})
@mock_register_at_fork
def test_side_effect_import(self, mock_os):
code = """if 1:
import threading
def target():
import random
t = threading.Thread(target=target)
t.start()
t.join()
t = None"""
sys.path.insert(0, os.curdir)
self.addCleanup(sys.path.remove, os.curdir)
filename = TESTFN + ".py"
with open(filename, "wb") as f:
f.write(code.encode('utf-8'))
self.addCleanup(unlink, filename)
self.addCleanup(forget, TESTFN)
self.addCleanup(rmtree, '__pycache__')
importlib.invalidate_caches()
__import__(TESTFN)
del sys.modules[TESTFN]
@reap_threads
def test_main():
old_switchinterval = None
try:
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
except AttributeError:
pass
try:
run_unittest(ThreadedImportTests)
finally:
if old_switchinterval is not None:
sys.setswitchinterval(old_switchinterval)
if __name__ == "__main__":
test_main()
|
main.py | #
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from enum import Enum
import logging
import os
from time import sleep
from threading import Thread
from typing import Optional
import pickle
from retry.api import retry_call
import tensorflow as tf
from experiment_metrics.api import publish
import grpc
from kubernetes import config, client
from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc
PROGRESS_METRIC_KEY = 'progress'
API_GROUP_NAME = 'aggregator.aipg.intel.com'
RUN_PLURAL = 'runs'
RUN_VERSION = 'v1'
LABEL_KEY = "label"
RESULT_KEY = "result"
progress = 0
max_progress = 1
stop_thread = False
log_level_env_var = os.getenv('LOG_LEVEL')
class APPLICABLE_FORMATS(Enum):
TF_RECORD = "tf-record"
if log_level_env_var:
desired_log_level = logging.getLevelName(log_level_env_var.upper())
if desired_log_level not in (logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG):
desired_log_level = logging.INFO
else:
desired_log_level = logging.INFO
logging.basicConfig(level=desired_log_level)
# CAN-1237 - by setting level of logs for k8s rest client to INFO I'm removing displaying content of
# every rest request sent by k8s client
k8s_rest_logger = logging.getLogger('kubernetes.client.rest')
k8s_rest_logger.setLevel(logging.INFO)
def do_batch_inference(server_address: str, input_dir_path: str, output_dir_path: str, related_run_name: str,
input_format: str):
detected_files = []
for root, _, files in os.walk(input_dir_path):
for name in files:
detected_files.append(os.path.join(root, name))
detected_files.sort()
channel = grpc.insecure_channel(server_address)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
global max_progress
global progress
max_progress = len(detected_files)
reverted_progress = try_revert_progress(related_run_name)
if reverted_progress:
logging.debug(f"new progress for processing: {progress}")
progress = reverted_progress
else:
logging.debug("no progress reverted")
files_to_process = detected_files[progress:]
for data_file in files_to_process:
logging.debug(f"processing file: {data_file}")
if input_format == APPLICABLE_FORMATS.TF_RECORD.value:
record_iterator = tf.python_io.tf_record_iterator(path=data_file)
id = 0
# if tf-record input format is chosen, results are stored in Python list containing dictionary items
# each item contains label (key - label) and binary object (key - result)
output_list = []
filename, _ = os.path.splitext(data_file)
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
label = example.features.feature['label'].bytes_list.value[0].decode('utf_8') \
if example.features.feature.get('label') else None
if not label:
label = data_file
if len(record_iterator) > 1:
label = "{}_{}".format(filename, id)
id += 1
binary_result = make_prediction(input=example.features.feature['data_pb'].bytes_list.value[0],
stub=stub)
output_list.append({LABEL_KEY: label, RESULT_KEY: binary_result})
output_filename = "{}.result".format(data_file)
with open(f'{output_dir_path}/{os.path.basename(output_filename)}', mode='wb') as fi:
pickle.dump(obj=output_list, file=fi, protocol=pickle.HIGHEST_PROTOCOL)
else:
with open(data_file, mode='rb') as fi:
pb_bytes = fi.read()
make_prediction(input=pb_bytes,
stub=stub,
output_filename=data_file,
output_dir_path=output_dir_path)
progress += 1
logging.info(f'progress: {progress}/{max_progress}')
def build_label_from_filename(filename: str, id: int):
name, _ = os.path.splitext(filename)
return "{}_{}".format(name, id)
def make_prediction(input: bytes, stub: prediction_service_pb2_grpc.PredictionServiceStub,
output_filename: str = None, output_dir_path: str = None):
request = predict_pb2.PredictRequest()
try:
request.ParseFromString(input)
except Exception as ex:
raise RuntimeError(f"failed to parse {output_filename}") from ex
# actual call without retry:
# result = stub.Predict(request, timeout=30.0) # timeout 30 seconds
result = retry_call(stub.Predict, fargs=[request], fkwargs={"timeout": 30.0}, tries=5, delay=30)
result_pb_serialized: bytes = result.SerializeToString()
if output_filename:
with open(f'{output_dir_path}/{os.path.basename(output_filename)}', mode='wb') as fi:
fi.write(result_pb_serialized)
return result_pb_serialized
def publish_progress():
logging.debug("starting publish_progress ...")
progress_percent = 0
while progress_percent != 100 and not stop_thread:
new_progress_percent = progress/max_progress * 100 if max_progress else 100
logging.debug(f"new_progress_percent: %.1f" % new_progress_percent)
if new_progress_percent != progress_percent:
progress_percent = new_progress_percent
metrics = {
PROGRESS_METRIC_KEY: str("%.1f" % progress_percent)
}
logging.debug("publishing metrics ...")
publish(metrics)
sleep(1)
def try_revert_progress(run_name: str) -> Optional[int]:
logging.debug("trying to revert progress...")
config.load_incluster_config()
with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", mode='r') as file:
my_current_namespace = file.read()
if not my_current_namespace:
raise RuntimeError(f"error reading my current namespace {str(my_current_namespace)}")
runs_custom_obj_client = client.CustomObjectsApi()
try:
run = runs_custom_obj_client.get_namespaced_custom_object(group=API_GROUP_NAME, version=RUN_VERSION,
plural=RUN_PLURAL, namespace=my_current_namespace,
name=run_name)
except Exception:
logging.exception("error when contacting to kubernetes API")
return None
try:
saved_progress: str = run['spec']['metrics']['progress']
except KeyError:
logging.debug(f"no progress metric detected")
return None
logging.debug(f"progress reverted! progress from metrics: {saved_progress}")
saved_progress: float = float(saved_progress)
real_progress = saved_progress/100 * max_progress if max_progress else 100
progress = int(real_progress)
return progress
def main():
related_run_name = os.getenv('RUN_NAME')
if not related_run_name:
raise RuntimeError('RUN_NAME env var must be set for publishing progress metrics!')
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir_path', type=str)
parser.add_argument('--output_dir_path', type=str)
parser.add_argument('--input_format', type=str)
args = parser.parse_args()
if not args.input_dir_path:
parser.error("'input_dir_path' is required!")
input_dir_path = args.input_dir_path
output_dir_path = args.output_dir_path if args.output_dir_path else '/mnt/output/experiment'
input_format = args.input_format
if not os.path.isdir(input_dir_path) or len(os.listdir(input_dir_path)) == 0:
raise RuntimeError(f"input directory: '{input_dir_path}' does not exist or is empty!")
progress_thread = Thread(target=publish_progress)
progress_thread.start()
try:
do_batch_inference(server_address=os.getenv('TENSORFLOW_MODEL_SERVER_SVC_NAME', ''),
input_dir_path=input_dir_path,
output_dir_path=output_dir_path,
related_run_name=related_run_name,
input_format=input_format)
except Exception:
global stop_thread
stop_thread = True
raise
if __name__ == '__main__':
main()
|
transfer_supports.py | import os
import io
import sys
import html
import socket
import urllib
from os.path import isfile, isdir
from functools import partial
from http.server import test, SimpleHTTPRequestHandler
from http import HTTPStatus
from multiprocessing import Process
__all__ = ['startTransferSever', 'get_local_ip', 'TransferSeverState']
def get_local_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
return ip
finally:
s.close()
class TransferSever(SimpleHTTPRequestHandler):
def generate_li(self, href, base_name, *, is_file=None, is_dir=None, is_link=None):
if is_file:
type = base_name.split('.')[-1].lower()
if type in ('png', 'jpg', 'jpeg', 'bmp', 'png', 'ico', 'svg'): # 图片文件
icon = 'fa-file-image-o'
elif type in ('mp4', 'mpeg', 'avi', 'navi', 'asf', 'mov', '3gp', 'wmv', 'divx', 'xvid', 'rm', 'rmvb', 'flv'):
icon = 'fa-file-video-o'
elif type in ('txt', 'cfg', 'ini', 'tmpl', 'log'):
icon = 'fa-file-text-o '
elif type in ('xlsx', 'xlsm', 'xls', 'xlst', 'xlsb'):
icon = 'fa-file-excel-o'
elif type in ('doc', 'docx', 'dotx', 'dot', 'docm', 'dotm', 'xps', 'mht', 'mhtml', 'rtf', 'xml', 'odt'):
icon == 'fa-file-word-o'
elif type in ('bat', 'py', 'c', 'cpp', 'js', 'ts', 'dart', 'java', 'pyc', 'pyd', 'class', 'pyw', 'sh', 'pyi', 'vba', 'vbs', 'vb'):
icon = 'fa-file-code-o'
elif type in ('mp3', 'flac', 'cd', 'wave', 'aiff', 'mpeg-4', 'midi', 'wma', 'amr', 'ape', 'aac'):
icon = 'fa-music'
elif type in ('pdf', ):
icon = 'fa-file-pdf-o'
elif type in ('zip', 'rar', '7z'):
icon = 'fa-file-archive-o'
elif type in ('html', 'html5', 'htmlx'):
icon = 'fa-html5'
else:
icon = 'fa-file-o'
elif is_dir:
icon = 'fa-folder-o'
elif is_link:
icon = 'fa-link'
if is_file:
li = '<li><a href="%s" download=""><i class="fa %s fa-fw" aria-hidden="true" style="color:gray"></i>%s</a></li>' % (
href, icon, base_name)
else:
li = '<li><a href="%s"><i class="fa %s fa-fw" aria-hidden="true" style="color:gray"></i>%s</a></li>' % (
href, icon, base_name)
return li
def list_directory(self, path):
try:
list = os.listdir(path)
except OSError:
self.send_error(
HTTPStatus.NOT_FOUND,
'No permission to list directory')
return None
list.sort(key=lambda a: a.lower())
r = []
enc = sys.getfilesystemencoding()
title = '当前目录 %s' % os.path.dirname(path).replace('\\', '/')
r.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" '
'"http://www.w3.org/TR/html4/strict.dtd">')
r.append('<html>\n<head>')
r.append('<meta http-equiv="Content-Type" '
'content="text/html; charset=%s">' % enc)
r.append(
'<link rel="stylesheet" href="https://cdn.bootcss.com/font-awesome/4.7.0/css/font-awesome.css">')
r.append('<title>文件传输</title>\n</head>')
r.append('<body>\n<h2>%s</h2>' % title)
r.append('<hr>\n<ol>')
i = 0
for name in list:
i += 1
fullname = os.path.join(path, name)
displayname = linkname = name
if os.path.isdir(fullname):
displayname = name + '/'
linkname = name + '/'
href = urllib.parse.quote(linkname, errors='surrogatepass')
base_name = html.escape(displayname, quote=False)
r.append(self.generate_li(href, base_name, is_dir=True))
if os.path.islink(fullname):
displayname = name + '@'
href = urllib.parse.quote(linkname, errors='surrogatepass')
base_name = html.escape(displayname, quote=False)
r.append(self.generate_li(href, base_name, is_link=True))
if os.path.isfile(fullname):
href = urllib.parse.quote(linkname, errors='surrogatepass')
base_name = html.escape(displayname, quote=False)
value = self.generate_li(href, base_name, is_file=True)
r.append(value)
r.append('</ol>\n<hr>\n</body>\n</html>\n')
encoded = '\n'.join(r).encode(enc, 'surrogateescape')
f = io.BytesIO()
f.write(encoded)
f.seek(0)
self.send_response(HTTPStatus.OK)
self.send_header('Content-type', 'text/html; charset=%s' % enc)
self.send_header('Content-Length', str(len(encoded)))
self.end_headers()
return f
@staticmethod
def run(directory):
handler_class = partial(TransferSever, directory=directory)
test(HandlerClass=handler_class, port=80)
class TransferSeverState(object):
def __init__(self, process=None):
self.process = process
def set_process(self, p):
self.process = p
def is_alive(self):
if self.process is None:
return False
return self.process.is_alive()
def terminte_process(self):
if self.process:
self.process.terminate()
self.process = None
def _run_server(directory: str):
TransferSever.run(directory)
def startTransferSever(directory: str) -> Process:
process = Process(target=_run_server, args=(directory, ))
process.daemon = True
process.start()
return process
|
test_framework.py | from __future__ import print_function
class AssertException(Exception):
pass
def format_message(message):
return message.replace("\n", "<:LF:>")
def display(type, message, label="", mode=""):
print("\n<{0}:{1}:{2}>{3}".format(
type.upper(), mode.upper(), label, format_message(message)))
def expect(passed=None, message=None, allow_raise=False):
if passed:
display('PASSED', 'Test Passed')
else:
message = message or "Value is not what was expected"
display('FAILED', message)
if allow_raise:
raise AssertException(message)
def assert_equals(actual, expected, message=None, allow_raise=False):
equals_msg = "{0} should equal {1}".format(repr(actual), repr(expected))
if message is None:
message = equals_msg
else:
message += ": " + equals_msg
expect(actual == expected, message, allow_raise)
def assert_not_equals(actual, expected, message=None, allow_raise=False):
r_actual, r_expected = repr(actual), repr(expected)
equals_msg = "{0} should not equal {1}".format(r_actual, r_expected)
if message is None:
message = equals_msg
else:
message += ": " + equals_msg
expect(not (actual == expected), message, allow_raise)
def expect_error(message, function, exception=Exception):
passed = False
try:
function()
except exception:
passed = True
except Exception:
pass
expect(passed, message)
def expect_no_error(message, function, exception=BaseException):
try:
function()
except exception as e:
fail("{}: {}".format(message or "Unexpected exception", repr(e)))
return
except Exception:
pass
pass_()
def pass_(): expect(True)
def fail(message): expect(False, message)
def assert_approx_equals(
actual, expected, margin=1e-9, message=None, allow_raise=False):
msg = "{0} should be close to {1} with absolute or relative margin of {2}"
equals_msg = msg.format(repr(actual), repr(expected), repr(margin))
if message is None:
message = equals_msg
else:
message += ": " + equals_msg
div = max(abs(actual), abs(expected), 1)
expect(abs((actual - expected) / div) < margin, message, allow_raise)
'''
Usage:
@describe('describe text')
def describe1():
@it('it text')
def it1():
# some test cases...
'''
def _timed_block_factory(opening_text):
from timeit import default_timer as timer
from traceback import format_exception
from sys import exc_info
def _timed_block_decorator(s, before=None, after=None):
display(opening_text, s)
def wrapper(func):
if callable(before):
before()
time = timer()
try:
func()
except Exception:
fail('Unexpected exception raised')
tb_str = ''.join(format_exception(*exc_info()))
display('ERROR', tb_str)
display('COMPLETEDIN', '{:.2f}'.format((timer() - time) * 1000))
if callable(after):
after()
return wrapper
return _timed_block_decorator
describe = _timed_block_factory('DESCRIBE')
it = _timed_block_factory('IT')
'''
Timeout utility
Usage:
@timeout(sec)
def some_tests():
any code block...
Note: Timeout value can be a float.
'''
def timeout(sec):
def wrapper(func):
from multiprocessing import Process
msg = 'Should not throw any exceptions inside timeout'
def wrapped():
expect_no_error(msg, func)
process = Process(target=wrapped)
process.start()
process.join(sec)
if process.is_alive():
fail('Exceeded time limit of {:.3f} seconds'.format(sec))
process.terminate()
process.join()
return wrapper
|
networking.py | """
Defines helper methods useful for setting up ports, launching servers, and
creating tunnels.
"""
from __future__ import annotations
import http
import json
import os
import socket
import threading
import time
import urllib.parse
import urllib.request
from typing import TYPE_CHECKING, Optional, Tuple
import fastapi
import requests
import uvicorn
from gradio import queueing
from gradio.routes import app
from gradio.tunneling import create_tunnel
if TYPE_CHECKING: # Only import for type checking (to avoid circular imports).
from gradio import Interface
# By default, the local server will try to open on localhost, port 7860.
# If that is not available, then it will try 7861, 7862, ... 7959.
INITIAL_PORT_VALUE = int(os.getenv("GRADIO_SERVER_PORT", "7860"))
TRY_NUM_PORTS = int(os.getenv("GRADIO_NUM_PORTS", "100"))
LOCALHOST_NAME = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
GRADIO_API_SERVER = "https://api.gradio.app/v1/tunnel-request"
class Server(uvicorn.Server):
def install_signal_handlers(self):
pass
def run_in_thread(self):
self.thread = threading.Thread(target=self.run, daemon=True)
self.thread.start()
while not self.started:
time.sleep(1e-3)
def close(self):
self.should_exit = True
self.thread.join()
def get_first_available_port(initial: int, final: int) -> int:
"""
Gets the first open port in a specified range of port numbers
Parameters:
initial: the initial value in the range of port numbers
final: final (exclusive) value in the range of port numbers, should be greater than `initial`
Returns:
port: the first open port in the range
"""
for port in range(initial, final):
try:
s = socket.socket() # create a socket object
s.bind((LOCALHOST_NAME, port)) # Bind to the port
s.close()
return port
except OSError:
pass
raise OSError(
"All ports from {} to {} are in use. Please close a port.".format(
initial, final
)
)
def start_server(
interface: Interface,
server_name: Optional[str] = None,
server_port: Optional[int] = None,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
) -> Tuple[int, str, fastapi.FastAPI, threading.Thread, None]:
"""Launches a local server running the provided Interface
Parameters:
interface: The interface object to run on the server
server_name: to make app accessible on local network, set this to "0.0.0.0". Can be set by environment variable GRADIO_SERVER_NAME.
server_port: will start gradio app on this port (if available). Can be set by environment variable GRADIO_SERVER_PORT.
auth: If provided, username and password (or list of username-password tuples) required to access interface. Can also provide function that takes username and password and returns True if valid login.
ssl_keyfile: If a path to a file is provided, will use this as the private key file to create a local server running on https.
ssl_certfile: If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided.
"""
server_name = server_name or LOCALHOST_NAME
# if port is not specified, search for first available port
if server_port is None:
port = get_first_available_port(
INITIAL_PORT_VALUE, INITIAL_PORT_VALUE + TRY_NUM_PORTS
)
else:
try:
s = socket.socket()
s.bind((LOCALHOST_NAME, server_port))
s.close()
except OSError:
raise OSError(
"Port {} is in use. If a gradio.Interface is running on the port, you can close() it or gradio.close_all().".format(
server_port
)
)
port = server_port
url_host_name = "localhost" if server_name == "0.0.0.0" else server_name
if ssl_keyfile is not None:
if ssl_certfile is None:
raise ValueError(
"ssl_certfile must be provided if ssl_keyfile is provided."
)
path_to_local_server = "https://{}:{}/".format(url_host_name, port)
else:
path_to_local_server = "http://{}:{}/".format(url_host_name, port)
auth = interface.auth
if auth is not None:
if not callable(auth):
app.auth = {account[0]: account[1] for account in auth}
else:
app.auth = auth
else:
app.auth = None
app.interface = interface
app.cwd = os.getcwd()
app.favicon_path = interface.favicon_path
app.tokens = {}
if app.interface.enable_queue:
if auth is not None or app.interface.encrypt:
raise ValueError("Cannot queue with encryption or authentication enabled.")
queueing.init()
app.queue_thread = threading.Thread(
target=queueing.queue_thread, args=(path_to_local_server,)
)
app.queue_thread.start()
if interface.save_to is not None: # Used for selenium tests
interface.save_to["port"] = port
config = uvicorn.Config(
app=app,
port=port,
host=server_name,
log_level="warning",
ssl_keyfile=ssl_keyfile,
ssl_certfile=ssl_certfile,
)
server = Server(config=config)
server.run_in_thread()
return port, path_to_local_server, app, server
def setup_tunnel(local_server_port: int, endpoint: str) -> str:
response = requests.get(
endpoint + "/v1/tunnel-request" if endpoint is not None else GRADIO_API_SERVER
)
if response and response.status_code == 200:
try:
payload = response.json()[0]
return create_tunnel(payload, LOCALHOST_NAME, local_server_port)
except Exception as e:
raise RuntimeError(str(e))
else:
raise RuntimeError("Could not get share link from Gradio API Server.")
def url_ok(url: str) -> bool:
try:
for _ in range(5):
time.sleep(0.500)
r = requests.head(url, timeout=3, verify=False)
if r.status_code in (200, 401, 302): # 401 or 302 if auth is set
return True
except (ConnectionError, requests.exceptions.ConnectionError):
return False
|
camera.py | from threading import Thread, Lock
import cv2
class CameraStream(object):
"""
Threaded class for capturing camera stream quickly in real time, media decoder: FFMPEG
objective: to capture each frame smoothly without any delay
output: return each frame
"""
def __init__(self, src=0, width=800, height=600):
"""
constructor function of class CameraStream
input:
src: type - int/string, source of camera. int if webcam else string if ipcam/ptz/other stream sources, default 0 for webcam
width: type - int, camera feed width
height: type - int, camera feed height
output:
none
"""
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
self.started = False
self.read_lock = Lock()
def start(self):
"""
thread start function of class CameraStream
input: none
output: return to self instance
"""
if self.started:
print("Camera Reading Thread Already Started!")
return None
self.started = True
self.thread = Thread(target=self.update, args=())
self.thread.start()
return self
def update(self):
"""
update thread for continuous frame reading of class CameraStream
input: none
output: return to self instance
"""
while self.started:
(grabbed, frame) = self.stream.read()
self.read_lock.acquire()
self.grabbed, self.frame = grabbed, frame
self.read_lock.release()
if self.started == False:
return
def read(self):
"""
reading thread to return each frame towards driver class/function of class CameraStream
input: none
output:
ret: type - bool, if camera stream available return true, return false otherwise
frame: type - np.array, return numpy array of each frame.
"""
self.read_lock.acquire()
ret = self.grabbed
if ret:
frame = self.frame.copy()
else:
frame = None
self.read_lock.release()
return ret, frame
def stop(self):
"""
thread stopper of CameraStream
input: none
output: none
"""
self.started = False
self.thread.join()
self.stream.release()
def __exit__(self, exc_type, exc_value, traceback):
"""
destroyer of the thread class of CameraStream
"""
self.stream.release()
class GstStream(object):
"""
Threaded class for capturing camera stream quickly in real time, media decoder: Gstreamer
objective: to capture each frame smoothly without any delay
output: return each frame
"""
def __init__(self, src=0, width=800, height=600):
"""
constructor function of class CameraStream
input:
src: type - int/string, source of camera. int if webcam else string if ipcam/ptz/other stream sources, default 0 for webcam
width: type - int, camera feed width
height: type - int, camera feed height
output:
none
"""
self.stream = cv2.VideoCapture(src, cv2.CAP_GSTREAMER)
# self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
# self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
self.started = False
self.read_lock = Lock()
def start(self):
"""
thread start function of class CameraStream
input: none
output: return to self instance
"""
if self.started:
print("Camera Reading Thread Already Started!")
return None
self.started = True
self.thread = Thread(target=self.update, args=())
self.thread.start()
return self
def update(self):
"""
update thread for continuous frame reading of class CameraStream
input: none
output: return to self instance
"""
while self.started:
(grabbed, frame) = self.stream.read()
self.read_lock.acquire()
self.grabbed, self.frame = grabbed, frame
self.read_lock.release()
if self.started == False:
return
def read(self):
"""
reading thread to return each frame towards driver class/function of class CameraStream
input: none
output:
ret: type - bool, if camera stream available return true, return false otherwise
frame: type - np.array, return numpy array of each frame.
"""
self.read_lock.acquire()
ret = self.grabbed
if ret:
frame = self.frame.copy()
else:
frame = None
self.read_lock.release()
return ret, frame
def stop(self):
"""
thread stopper of CameraStream
input: none
output: none
"""
self.started = False
self.thread.join()
self.stream.release()
def __exit__(self, exc_type, exc_value, traceback):
"""
destroyer of the thread class of CameraStream
"""
self.stream.release() |
test_task.py | import os
import threading
import pytest
from ddtrace import compat
from ddtrace.internal import nogevent
from ddtrace.profiling.collector import _task
TESTING_GEVENT = os.getenv("DD_PROFILE_TEST_GEVENT", False)
def test_get_task_main():
# type: (...) -> None
if _task._gevent_tracer is None:
assert _task.get_task(nogevent.main_thread_id) == (None, None, None)
else:
assert _task.get_task(nogevent.main_thread_id) == (compat.main_thread.ident, "MainThread", None)
@pytest.mark.skipif(TESTING_GEVENT, reason="only works without gevent")
def test_list_tasks_nogevent():
assert _task.list_tasks() == []
@pytest.mark.skipif(not TESTING_GEVENT, reason="only works with gevent")
def test_list_tasks_gevent():
l1 = threading.Lock()
l1.acquire()
def wait():
l1.acquire()
l1.release()
def nothing():
pass
t1 = threading.Thread(target=wait, name="t1")
t1.start()
tasks = _task.list_tasks()
# can't check == 2 because there are left over from other tests
assert len(tasks) >= 2
main_thread_found = False
t1_found = False
for task in tasks:
assert len(task) == 3
# main thread
if task[0] == compat.main_thread.ident:
assert task[1] == "MainThread"
assert task[2] is None
main_thread_found = True
# t1
elif task[0] == t1.ident:
assert task[1] == "t1"
assert task[2] is not None
t1_found = True
l1.release()
t1.join()
assert t1_found
assert main_thread_found
|
chatClient.py | #!/usr/bin/env python3
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import sys
def receive():
"""Handles receiving of messages."""
while True:
try:
msg = client_socket.recv(BUFSIZ).decode("utf8")
sys.stdout.flush()
print('{}\n'.format(msg))
except OSError: # Possibly client has left the chat.
break
def send(event=None): # event is passed by binders.
"""Handles sending of messages."""
while True:
msg = input('')
client_socket.send(bytes(msg, "utf8"))
if msg == "{quit}":
client_socket.close()
top.quit()
def on_closing(event=None):
"""This function is to be called when the window is closed."""
my_msg.set("{quit}")
send()
#----Now comes the sockets part----
HOST = input('Enter host: ')
PORT = input('Enter port: ')
if not HOST:
HOST = '99.5.124.116'
if not PORT:
PORT = 44000
else:
PORT = int(PORT)
BUFSIZ = 1024
ADDR = (HOST, PORT)
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.connect(ADDR)
receive_thread = Thread(target=receive)
receive_thread.start()
send_thread = Thread(target=send)
send_thread.start()
# tkinter.mainloop() # Starts GUI execution.
|
A3C_RNN.py | """
Asynchronous Advantage Actor Critic (A3C) + RNN with continuous action space, Reinforcement Learning.
The Pendulum example.
Using:
tensorflow
gym
"""
import multiprocessing
import threading
import tensorflow as tf
import numpy as np
import gym
import os
import shutil
import matplotlib.pyplot as plt
GAME = 'Pendulum-v0'
OUTPUT_GRAPH = True
LOG_DIR = './log'
N_WORKERS = multiprocessing.cpu_count()
MAX_EP_STEP = 200
MAX_GLOBAL_EP = 1500
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 5
GAMMA = 0.9
ENTROPY_BETA = 0.01
LR_A = 0.0001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0
env = gym.make(GAME)
N_S = env.observation_space.shape[0]
N_A = env.action_space.shape[0]
A_BOUND = [env.action_space.low, env.action_space.high]
class ACNet(object):
def __init__(self, scope, globalAC=None):
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_params, self.c_params = self._build_net(scope)[-2:]
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
mu, sigma, self.v, self.a_params, self.c_params = self._build_net(scope)
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('wrap_a_out'):
mu, sigma = mu * A_BOUND[1], sigma + 1e-4
normal_dist = tf.distributions.Normal(mu, sigma)
with tf.name_scope('a_loss'):
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * tf.stop_gradient(td)
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=[0, 1]), A_BOUND[0], A_BOUND[1])
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self, scope):
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('critic'): # only critic controls the rnn update
cell_size = 64
s = tf.expand_dims(self.s, axis=1,
name='timely_input') # [time_step, feature] => [time_step, batch, feature]
rnn_cell = tf.contrib.rnn.BasicRNNCell(cell_size)
self.init_state = rnn_cell.zero_state(batch_size=1, dtype=tf.float32)
outputs, self.final_state = tf.nn.dynamic_rnn(
cell=rnn_cell, inputs=s, initial_state=self.init_state, time_major=True)
cell_out = tf.reshape(outputs, [-1, cell_size], name='flatten_rnn_outputs') # joined state representation
l_c = tf.layers.dense(cell_out, 50, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
with tf.variable_scope('actor'): # state representation is based on critic
l_a = tf.layers.dense(cell_out, 80, tf.nn.relu6, kernel_initializer=w_init, name='la')
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu')
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return mu, sigma, v, a_params, c_params
def update_global(self, feed_dict): # run by a local
SESS.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s, cell_state): # run by a local
s = s[np.newaxis, :]
a, cell_state = SESS.run([self.A, self.final_state], {self.s: s, self.init_state: cell_state})
return a, cell_state
class Worker(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME).unwrapped
self.name = name
self.AC = ACNet(name, globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
rnn_state = SESS.run(self.AC.init_state) # zero rnn state at beginning
keep_state = rnn_state.copy() # keep rnn state for updating global net
for ep_t in range(MAX_EP_STEP):
if self.name == 'W_0':
self.env.render()
a, rnn_state_ = self.AC.choose_action(s, rnn_state) # get the action and next rnn state
s_, r, done, info = self.env.step(a)
done = True if ep_t == MAX_EP_STEP - 1 else False
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r+8)/8) # normalize
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :], self.AC.init_state: rnn_state_})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
self.AC.init_state: keep_state,
}
self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global()
keep_state = rnn_state_.copy() # replace the keep_state as the new initial rnn state_
s = s_
rnn_state = rnn_state_ # renew rnn state
total_step += 1
if done:
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.9 * GLOBAL_RUNNING_R[-1] + 0.1 * ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
)
GLOBAL_EP += 1
break
if __name__ == "__main__":
SESS = tf.Session()
with tf.device("/cpu:0"):
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator()
SESS.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, SESS.graph)
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('step')
plt.ylabel('Total moving reward')
plt.show()
|
k2_blockdevice_api.py | """ This is k2_blockdevice_api docstring """
import logging
import platform
import uuid
import threading
import bitmath
from flocker.node.agents import blockdevice
from zope.interface import implementer
from twisted.python import filepath
from kaminario_flocker_driver.utils.k2_api_client import K2StorageCenterApi, \
StorageDriverAPIException, InvalidDataException, ImproperConfigurationError
from kaminario_flocker_driver.constants import UNLIMITED_QUOTA, \
VG_PREFIX, VOL_PREFIX, LEN_OF_DATASET_ID, RETRIES
import eliot
LOG = logging.getLogger(__name__)
class K2BlockDriverLogHandler(logging.Handler):
"""Python log handler to route to Eliot logging."""
def emit(self, record):
"""Writes log message to the stream.
:param record: The record to be logged.
"""
msg = self.format(record)
eliot.Message.new(
message_type="flocker:node:agents:blockdevice:k2storagecenter",
message_level=record.levelname,
message=msg).write()
def instantiate_driver_instance(cluster_id, **config):
"""Instantiate a new K2 Block device driver instance.
:param cluster_id: The Flocker cluster ID.
:param config: The Flocker Driver configuration settings.
:return: A new StorageCenterBlockDeviceAPI object.
"""
# Configure log routing to the Flocker Eliot logging
root_logger = logging.getLogger()
root_logger.addHandler(K2BlockDriverLogHandler())
root_logger.setLevel(logging.DEBUG)
config['cluster_id'] = cluster_id
return K2BlockDeviceAPI(**config)
@implementer(blockdevice.IBlockDeviceAPI)
class K2BlockDeviceAPI(object):
"""Block device driver for Kaminario (K2) Storage device.
A "IBlockDeviceAPI" for interacting with Storage Center
array storage.
"""
def __init__(self, **kwargs):
"""Initialize new instance of the driver.
:param config: The driver configuration settings
such as host, port, username, password.
:param cluster_id: The cluster ID is running.
:param is_dedup: The flag to be set for dedup activation.
:param destroy_host: The flag to set for destroying host if none of the
volume is mapped
"""
self.cluster_id = kwargs.get('cluster_id')
self.instance_name = None
self.api_client = K2StorageCenterApi(kwargs['storage_host'],
kwargs['username'],
kwargs['password'],
kwargs.get('is_ssl', False),
kwargs.get('retries', RETRIES))
# Created single instance of krest
self.krest = self.api_client.connect_to_api()
self.is_dedup = kwargs.get("is_dedup")
if self.is_dedup:
self.is_dedup = self.api_client.is_true(
self.is_dedup)
else:
raise ImproperConfigurationError(
"'is_dedup' attribute is not set in agent.yml file.")
self.destroy_host = kwargs.get('destroy_host', False)
if self.destroy_host:
self.destroy_host = self.api_client.is_true(
self.destroy_host)
def _return_to_block_device_volume(self, volume, attached_to=None):
"""Converts K2 API volume to a `BlockDeviceVolume`.
With the help of blockdevice_id OS can uniquely identify volume/device
being referenced by Flocker.
K2 API returns SCSI Serial number(Page 0x80)(scsi_sn) as
unique identification of volume.
"""
dataset_id = uuid.UUID('{00000000-0000-0000-0000-000000000000}')
try:
# volume name has a prefix and dataset_id
# Assumption: dataset id is of 36 chars.
dataset_id = uuid.UUID("{0}".
format(volume.name)[-LEN_OF_DATASET_ID:])
except ValueError:
pass
ret_val = blockdevice.BlockDeviceVolume(
blockdevice_id=volume.scsi_sn,
size=int(self.api_client.kib_to_bytes(volume.size)),
attached_to=attached_to,
dataset_id=dataset_id)
return ret_val
def _iscsi_rescan(self, process):
"""Performs a SCSI rescan on this host."""
rescan_thread = threading.Thread(target=self.api_client.rescan_iscsi)
rescan_thread.name = '{0}_rescan'.format(process)
rescan_thread.daemon = True
rescan_thread.start()
@staticmethod
def allocation_unit():
"""Gets the minimum allocation unit for our K2 backend.
The K2 recommended minimum is 1 GiB per volume.
:returns: 1 GiB in bytes.
"""
return bitmath.GiB(1).bytes
def compute_instance_id(self):
"""Gets an identifier for this node.
This will be compared against ``BlockDeviceVolume.attached_to``
to determine which volumes are locally attached and it will be used
with ``attach_volume`` to locally attach volumes.
For K2, we use the node's hostname as the identifier.
:returns: A ``unicode`` object giving a provider-specific node
identifier which identifies the node where the method
is run.
"""
if not self.instance_name:
self.instance_name = unicode(platform.uname()[1])
return self.instance_name
def _create_new_host(self, attach_to):
"""Create a new host on K2 array
:param attach_to: It is a hostname of node which is returned
by "compute_instance_id" method.
:return: new host
"""
# Currently host groups not supported.
# Standalone host creation supported only
# TODO: This is for host group creation
# hg_name = u"{}_group".format(attach_to)
# hg = self.krest.search("host_groups", name=hg_name)
# if hg.total == 0:
# hg = self.krest.new("host_groups", name=hg_name).save()
# else:
# hg = hg.hits[0]
# host = self.krest.new("hosts", name=attach_to,
# type=self.api_client.get_host_type,
# host_group=hg).save()
host = self.krest.new("hosts", name=attach_to,
type=self.api_client.host_type).save()
LOG.info("Created new host %s", host)
return host
@staticmethod
def _map_host_with_iqn(iqn_obj, host):
""" Save or map the host with iqn.
:param iqn_obj: iqn object from krest
:param host: host object from krest
:return: host_iqn object
"""
host_iqns = iqn_obj
host_iqns.host = host
host_iqns.save()
LOG.info("Saved iqn with host server")
return host_iqns
def create_volume(self, dataset_id, size):
"""Create a new volume on the K2 array.
:param dataset_id: The Flocker dataset ID for the volume.
:param size: The size of the new volume in bytes.
:return: A ``BlockDeviceVolume``
"""
sc_volume = {}
volume_group = u"{}-{}".format(VG_PREFIX, dataset_id)
volume_name = u"{}-{}".format(VOL_PREFIX, dataset_id)
volume_size = self.api_client.bytes_to_kib(size)
try:
sc_volume_group = self.krest.new("volume_groups",
name=volume_group,
quota=UNLIMITED_QUOTA,
is_dedup=self.is_dedup).save()
except Exception as e:
raise StorageDriverAPIException('Error creating volume group:'
' {}'.format(e.message))
if sc_volume_group:
try:
sc_volume = self.krest.new("volumes",
name=volume_name,
size=volume_size,
volume_group=sc_volume_group).save()
except Exception:
raise StorageDriverAPIException('Error creating volume.')
return self._return_to_block_device_volume(sc_volume)
def create_volume_with_profile(self, dataset_id, size, profile_name=None):
"""Create a new volume on the array.
:param dataset_id: The Flocker dataset ID for the volume.
:param size: The size of the new volume in bytes.
:param profile_name: The name of the storage profile for
this volume.
:return: A ``BlockDeviceVolume``
"""
pass
def attach_volume(self, blockdevice_id, attach_to):
"""Attach an existing volume to an initiator (host).
:param blockdevice_id: The unique identifier(scsi_sn of k2)
for the volume.
:param attach_to: It is a hostname of node which is returned
by "compute_instance_id" method.
:raises UnknownVolume: If the supplied "blockdevice_id" does not
exist.
:returns: A "BlockDeviceVolume" with a "attached_to" attribute set
to "attach_to".
"""
LOG.info('attaching to blockdevice_id %s and host is %s',
blockdevice_id, attach_to)
# Searching for volume by scsi_sn via krest
volume = self.krest.search("volumes", scsi_sn=blockdevice_id)
if volume.total == 0:
raise blockdevice.UnknownVolume(blockdevice_id)
# Check for host which is associate with iqn(iSCSI Qualified Name)
iqn = self.api_client.get_initiator_name()
host_iqns = self.krest.search("host_iqns", iqn=iqn)
host = self.api_client.rgetattr(host_iqns.hits[0], "host", None) \
if host_iqns.total > 0 else None
# if iqn is not associate with any host
if not host:
# searching instance or node host which is return
# by compute_instance_id method.
host = self.krest.search("hosts", name=attach_to)
if host.total > 0:
raise InvalidDataException(
'Present host is not mapped with iqn')
else:
host = self._create_new_host(attach_to)
self._map_host_with_iqn(host_iqns.hits[0], host)
# Make sure the server is logged in to the array
ips = self.krest.search("system/net_ips")
for ip in ips.hits:
self.api_client.iscsi_login(
self.api_client.rgetattr(ip, 'ip_address', None), 3260)
# Make sure we were able to find host
if not host:
raise InvalidDataException('Host does not exits')
volume = volume.hits[0]
# First check if we are already mapped
mapped = self.krest.search('mappings', volume=volume)
if mapped.total > 0:
# Get the mapped host
mapped_host = self.api_client.rgetattr(mapped.hits[0], "host", None)
if mapped_host != host:
LOG.info("Mapped server %s", mapped_host)
# raise exception for attached volume
raise blockdevice.AlreadyAttachedVolume(blockdevice_id)
# Make sure host should not be associate with host group
# Note: Currently host groups not supported.
try:
mapping = self.krest.new("mappings", volume=volume, host=host)
mapping.save()
LOG.info("Mapping is done- %s", mapping)
except Exception:
raise StorageDriverAPIException(
'Unable to map volume to server.')
# start iscsi rescan
self._iscsi_rescan('attach')
return self._return_to_block_device_volume(volume, attach_to)
def detach_volume(self, blockdevice_id):
"""Detach ``blockdevice_id`` from whatever host it is attached to.
:param unicode blockdevice_id: The unique identifier for the block
device being detached.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:raises UnattachedVolume: If the supplied ``blockdevice_id`` is
not attached to anything.
:returns: ``None``
"""
LOG.info('Detaching %s', blockdevice_id)
# Check for volume by block device id(scsi_sn)
volume = self.krest.search("volumes", scsi_sn=blockdevice_id)
if volume.total == 0:
raise blockdevice.UnknownVolume(blockdevice_id)
# First check if we are mapped.
mapped = self.krest.search("mappings", volume=volume)
if mapped.total == 0:
raise blockdevice.UnattachedVolume(blockdevice_id)
# executing sync cmd for synchronize data on disk with memory
self.api_client.sync_device()
paths = self.api_client.find_paths(blockdevice_id)
for path in paths:
if "/dev/mapper/" in path:
self.api_client.remove_multipath(path)
break
# Make sure iqn is mapped with host.
iqn = self.api_client.get_initiator_name()
host_iqns = self.krest.search("host_iqns", iqn=iqn)
if host_iqns.total > 0:
# find host which is associate with iqn.
host_iqns = self.api_client.rgetattr(
host_iqns.hits[0], "host", None)
# Get the mapped host
host = self.api_client.rgetattr(mapped.hits[0], "host", None)
# Make sure host should be exists for volume
if not host:
raise StorageDriverAPIException('Unable to locate server.')
# Make sure both host have same name which is to be unmapped
if host.name == host_iqns.name:
mapped.hits[0].delete()
LOG.info("Removed mapped host %s", host.name)
if self.destroy_host:
try:
host.delete()
except Exception as e:
LOG.exception("Unable to delete host due to %s", e.message)
pass
# start iscsi rescan
self._iscsi_rescan('detach')
return None
def destroy_volume(self, blockdevice_id):
"""Destroy an existing volume from an initiator (host).
:param blockdevice_id: The volume unique ID.
"""
LOG.info('Destroying volume %s', blockdevice_id)
try:
volume = self.krest.search("volumes", scsi_sn=blockdevice_id)
if volume.total == 0:
raise blockdevice.UnknownVolume(blockdevice_id)
volume = volume.hits[0]
volume_group = self.api_client.rgetattr(
volume, "volume_group", None)
volume.delete()
volume_group.delete()
except Exception:
raise StorageDriverAPIException(
'Error destroying volume blockdevice_id:{}'.format(
blockdevice_id))
return None
def list_volumes(self):
"""List all the block devices available via the back end API.
:returns: A ``list`` of ``BlockDeviceVolume``s.
"""
LOG.info('Listing volumes')
volumes = []
vols = self.krest.search("volumes")
mappings = self.krest.search('mappings')
# we are removing CTRL volume from volume array, just to pass
# functional test cases
# NOTE: CTRL volume is making cause to functional test cases
for index, vol in enumerate(vols.hits):
if vol.name == "CTRL":
del vols.hits[index]
# Now convert our API objects to Flocker ones
for vol in vols:
attached_to = None
mapped = self.api_client.advance_search(mappings,
volume__name=vol.name)
if len(mapped) > 0:
attached_to = self.api_client.rgetattr(
mapped[0], "host.name", None)
volumes.append(
self._return_to_block_device_volume(vol, attached_to))
return volumes
def get_device_path(self, blockdevice_id):
"""Return the device path.
Returns the local device path that has been allocated to the block
device on the host to which it is currently attached.
:param unicode blockdevice_id: The unique identifier for the block
device.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:raises UnattachedVolume: If the supplied ``blockdevice_id`` is
not attached to a host.
:returns: A ``FilePath`` for the device.
"""
# Check for volume
volume = self.krest.search("volumes", scsi_sn=blockdevice_id)
if volume.total == 0:
raise blockdevice.UnknownVolume(blockdevice_id)
volume = volume.hits[0]
# Check for volume is mapped or not
# NOTE: The assumption right now is if we are mapped,
# we are mapped to the instance host.
mapped = self.krest.search("mappings", volume=volume)
if mapped.total == 0:
# if not mapped raise exception
raise blockdevice.UnattachedVolume(blockdevice_id)
# Get devices path
paths = self.api_client.find_paths(blockdevice_id)
if paths:
# return the first path
LOG.info('%s path', paths[0])
return filepath.FilePath(paths[0])
return None
|
udf.py | """Fonduer UDF."""
import logging
from multiprocessing import Manager, Process
from queue import Queue
from threading import Thread
from typing import Any, Collection, Dict, List, Optional, Set, Type, Union
from sqlalchemy import inspect
from sqlalchemy.orm import Session, scoped_session, sessionmaker
from fonduer.meta import Meta, new_sessionmaker
from fonduer.parser.models.document import Document
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config:
raise ImportError("console")
except (AttributeError, ImportError):
from tqdm import tqdm
else:
from tqdm.notebook import tqdm
logger = logging.getLogger(__name__)
class UDFRunner(object):
"""Class to run UDFs in parallel using simple queue-based multiprocessing setup."""
def __init__(
self,
session: Session,
udf_class: Type["UDF"],
parallelism: int = 1,
**udf_init_kwargs: Any,
) -> None:
"""Initialize UDFRunner."""
self.udf_class = udf_class
self.udf_init_kwargs = udf_init_kwargs
self.udfs: List["UDF"] = []
self.pb = None
self.session = session
self.parallelism = parallelism
#: The last set of documents that apply() was called on
self.last_docs: Set[str] = set()
def apply(
self,
doc_loader: Collection[
Document
], # doc_loader has __len__, but Iterable doesn't.
clear: bool = True,
parallelism: Optional[int] = None,
progress_bar: bool = True,
**kwargs: Any,
) -> None:
"""Apply the given UDF to the set of objects returned by the doc_loader.
Either single or multi-threaded, and optionally calling clear() first.
"""
# Clear everything downstream of this UDF if requested
if clear:
self.clear(**kwargs)
# Execute the UDF
logger.info("Running UDF...")
# Setup progress bar
if progress_bar:
logger.debug("Setting up progress bar...")
if hasattr(doc_loader, "__len__"):
self.pb = tqdm(total=len(doc_loader))
else:
logger.error("Could not determine size of progress bar")
# Use the parallelism of the class if none is provided to apply
parallelism = parallelism if parallelism else self.parallelism
self._apply(doc_loader, parallelism, clear=clear, **kwargs)
# Close progress bar
if self.pb is not None:
logger.debug("Closing progress bar...")
self.pb.close()
logger.debug("Running after_apply...")
self._after_apply(**kwargs)
def clear(self, **kwargs: Any) -> None:
"""Clear the associated data from the database."""
raise NotImplementedError()
def _after_apply(self, **kwargs: Any) -> None:
"""Execute this method by a single process after apply."""
pass
def _add(self, session: Session, instance: Any) -> None:
pass
def _apply(
self, doc_loader: Collection[Document], parallelism: int, **kwargs: Any
) -> None:
"""Run the UDF multi-threaded using python multiprocessing."""
if not Meta.postgres:
raise ValueError("Fonduer must use PostgreSQL as a database backend.")
# Create an input queue to feed documents to UDF workers
manager = Manager()
# Set maxsize (#435). The number is heuristically determined.
in_queue = manager.Queue(maxsize=parallelism * 2)
# Use an output queue to track multiprocess progress
out_queue = manager.Queue()
# Clear the last documents parsed by the last run
self.last_docs = set()
# Create DB session factory for insert data on each UDF (#545)
session_factory = new_sessionmaker()
# Create UDF Processes
for i in range(parallelism):
udf = self.udf_class(
session_factory=session_factory,
runner=self,
in_queue=in_queue,
out_queue=out_queue,
worker_id=i,
**self.udf_init_kwargs,
)
udf.apply_kwargs = kwargs
self.udfs.append(udf)
# Start the UDF processes
for udf in self.udfs:
udf.start()
# Fill input queue with documents but # of docs in queue is capped (#435).
def in_thread_func() -> None:
# Do not use session here to prevent concurrent use (#482).
for doc in doc_loader:
in_queue.put(doc) # block until a free slot is available
Thread(target=in_thread_func).start()
count_parsed = 0
total_count = len(doc_loader)
while (
any([udf.is_alive() for udf in self.udfs]) or not out_queue.empty()
) and count_parsed < total_count:
# Get doc from the out_queue and persist the result into postgres
try:
doc_name = out_queue.get() # block until an item is available
self.last_docs.add(doc_name)
# Update progress bar whenever an item has been processed
count_parsed += 1
if self.pb is not None:
self.pb.update(1)
except Exception as e:
# Raise an error for all the other exceptions.
raise (e)
# Join the UDF processes
for _ in self.udfs:
in_queue.put(UDF.TASK_DONE)
for udf in self.udfs:
udf.join()
# Flush the processes
self.udfs = []
class UDF(Process):
"""UDF class."""
TASK_DONE = "done"
def __init__(
self,
session_factory: sessionmaker = None,
runner: UDFRunner = None,
in_queue: Optional[Queue] = None,
out_queue: Optional[Queue] = None,
worker_id: int = 0,
**udf_init_kwargs: Any,
) -> None:
"""Initialize UDF.
:param in_queue: A Queue of input objects to processes
:param out_queue: A Queue of output objects from processes
:param worker_id: An ID of a process
"""
super().__init__()
self.daemon = True
self.session_factory = session_factory
self.runner = runner
self.in_queue = in_queue
self.out_queue = out_queue
self.worker_id = worker_id
# We use a workaround to pass in the apply kwargs
self.apply_kwargs: Dict[str, Any] = {}
def run(self) -> None:
"""Run function of UDF.
Call this method when the UDF is run as a Process in a
multiprocess setting The basic routine is: get from JoinableQueue,
apply, put / add outputs, loop
"""
# Each UDF get thread local (scoped) session from connection pools
# See SQLalchemy, using scoped sesion with multiprocessing.
Session = scoped_session(self.session_factory)
session = Session()
while True:
doc = self.in_queue.get() # block until an item is available
if doc == UDF.TASK_DONE:
break
# Merge the object with the session owned by the current child process.
# This does not happen during parsing when doc is transient.
if not inspect(doc).transient:
doc = session.merge(doc, load=False)
y = self.apply(doc, **self.apply_kwargs)
self.runner._add(session, y)
self.out_queue.put(doc.name)
session.commit()
session.close()
Session.remove()
def apply(
self, doc: Document, **kwargs: Any
) -> Union[Document, None, List[List[Dict[str, Any]]]]:
"""Apply function.
This function takes in an object, and returns a generator / set / list.
"""
raise NotImplementedError()
|
multimedia.py | # !/usr/bin/env python3
# Autor: Hernández Albino Edgar Alejandro
# Maceda Nazario Luis Martín
# Bibliotecas a utilizar
import pygame
import threading
import tkinter as tk
from tkinter import*
import webbrowser
import vlc
import os
# Para añadir los encabezados a cada sección
def title(frame, message):
label = tk.Label(frame, text=message, fg="#E5E8E8", bg="#282A36", font=("Bebas Kai", 30))
label.pack()
# Para agregar los servicios a cada sección
def service(frame, img):
service = tk.Label(frame, image=img)
service.config(bg="#282A36")
service.pack(pady=15, padx=15)
# Crea divisiones para la ventana
def frame(parent, wid, relX=0):
divisor = tk.Frame(parent)
divisor.place(relx=relX, relwidth=wid, relheight=0.9)
divisor.config(bg="#282A36")
return divisor
# Creamos botones asando la raiz y la imagen
def button(frame, img):
press = tk.Button(frame, image=img, bg="#282A36", activebackground="#F325C1")
press.pack(padx=10, pady=10)
return press
# Para añadir la dirección web a cada servicio
def web(url):
webbrowser.open(url)
# Con este método se busca validar que los archivos de la memoria se encuentren dentro de
# la lista de extensiones permitidas
def buscar(a,fichero):
for i in range(len(a)):
if fichero.endswith(a[i]):# Revisa si el archivo termina con una extensión dada por la lista a y regresa True o False
return True
return False
# Revisa el contenido de la ubicación donde se encuentran los archivos de USB disponibles
# Si no hay USB regresa una lista vacía
def encontrar(entrada):
contenido2=os.listdir('/media/pi/')
if len(contenido2)==0:
return contenido2
# Si hay USB conectada concatena la ubicación de la carptea con el nombre de la memoria
contenido=os.listdir('/media/pi/'+contenido2[0])
arreglo=[]
# Valida que los documentos de la memria cumplan con la extensión
for fichero in contenido:
# Con cada iteración verifica que la ruta + el contenido exista y si es así regresa True
if os.path.isfile(os.path.join('/media/pi/'+contenido2[0],fichero))and buscar(entrada,fichero):
arreglo.append(fichero)
return arreglo
# Creamos una clase para elegir la multimedia en otra ventana
class Ventana2:
def __init__(self,maestro,elec):
self.maestro=maestro
maestro.title("Interfaz Memoria")
a=[".mp3"] # Extensiones de música válidas
arr=encontrar(a)
b=[".mp4"] # Extensiones de vídeo válidas
arr2=encontrar(b)
c=[".jpg",".png",".jfif"] # Extensiones de imagen válidas
arr3=encontrar(c)
# Si no hay nada en la memoria o no hay memoria entrara al siguiente
if(len(arr)==0 and len(arr2)==0 and len(arr3)==0):
self.lab=Label(maestro,text="No hay contenido") # Pone una etiqueta de que no hay contenido
self.lab.pack()
# Creamos los botones del menú
self.salir=Button(maestro,text="#SALIR#",command=self.salir)
self.salir.pack()
if(len(arr)!=0):
self.boton=Button(maestro,text="■ MÚSICA ■",command=self.youtube)
self.boton.pack()
if(len(arr2)!=0):
self.boton=Button(maestro,text="■ IMÁGENES ■",command=self.spt)
self.boton.pack()
if(len(arr3)!=0):
self.boton=Button(maestro,text="■ VIDEO ■",command=self.memoria)
self.boton.pack()
# Le agregamos la función que ejecutara a cada boton
def youtube(self):
self.elec=1
self.maestro.destroy()
def spt(self):
self.elec=2
self.maestro.destroy()
def memoria(self):
self.elec=3
self.maestro.destroy()
def regresa(self):
return self.elec
def salir(self):
self.elec=5
self.maestro.destroy()
# Ventana de audio
class Ventana3:
def __init__(self,maestro,n):
self.maestro=maestro
self.i=0
self.aux=True
self.ban=True
self.n=n
maestro.title("Reproductor de audio")
pygame.mixer.music.load(self.n[self.i])# Carga el archivo de audio
pygame.mixer.music.play()# Se reproduce el archivo cargado
# Creamos los botones del reproductor
self.boton=Button(maestro,text="▌▐",command=self.youtube)
self.boton.pack()
self.boton1=Button(maestro,text=">>>",command=self.spt)
self.boton1.pack()
self.boton2=Button(maestro,text="<<<",command=self.retro)
self.boton2.pack()
self.boton3=Button(maestro,text="#SALIR#",command=self.memoria)
self.boton3.pack()
# Función de pausa/play del reproductor
def youtube(self):
if(self.ban==True):
pygame.mixer.music.pause()
self.ban=False
else:
pygame.mixer.music.unpause()
self.ban=True
# Carga el audio siguente en la lista
def spt(self):
self.i=self.i+1
# Si hay más archivos lo reproduce
if(self.i<len(self.n)):
pygame.mixer.music.stop()
pygame.mixer.music.load(self.n[self.i])
pygame.mixer.music.play()
# En caso de que sea el último archivo se reinicia la lista
else:
self.i=0
pygame.mixer.music.stop()
pygame.mixer.music.load(self.n[self.i])
pygame.mixer.music.play()
# Retrocede en la lista de reproducción
def retro(self):
self.i=self.i-1
# Pasa al archivo anterior
if(self.i>=0):
pygame.mixer.music.stop()
pygame.mixer.music.load(self.n[self.i])
pygame.mixer.music.play()
# Si el archivo es el primero de la lista pasa al último
else:
self.i=len(self.n)-1
pygame.mixer.music.stop()
pygame.mixer.music.load(self.n[self.i])
pygame.mixer.music.play()
# Para salir del menú
def memoria(self):
self.maestro.destroy()
self.aux=False
pygame.mixer.music.stop()
def devolver(self):
return self.aux
# Ventana de imagen
class Ventana4:
def __init__(self,maestro,n):
self.maestro=maestro
self.i=0
self.n=n
maestro.title("Reproductor de Imagenes")
self.media=vlc.MediaPlayer(self.n[self.i])
# Despues de crear la imagen se crean los botones
self.boton1=Button(maestro,text=">>>",command=self.spt)
self.boton1.pack()
self.boton2=Button(maestro,text="<<<",command=self.retro)
self.boton2.pack()
self.boton3=Button(maestro,text="#SALIR#",command=self.memoria)
self.boton3.pack()
# Pasa a la siguiente imagen en la lista
def spt(self):
self.media.stop()
self.i=self.i+1
# Carga el siguinete archivo si el tamaño de la lista es mayor al i actual
if(self.i<len(self.n)):
self.media=vlc.MediaPlayer(self.n[self.i])
self.media.play()
# Se reinicia la lista en caso de que sea el último valor
else:
self.i=0
self.media=vlc.MediaPlayer(self.n[self.i])
self.media.play()
# Cambia al archivo anterior en la lista
def retro(self):
self.i=self.i-1
self.media.stop()
if(self.i>=0):
self.media=vlc.MediaPlayer(self.n[self.i])
self.media.play()
# Si es el último archivo pasa al primero
else:
self.i=len(self.n)-1
self.media=vlc.MediaPlayer(self.n[self.i])
self.media.play()#
# Quita el reproductor de imagen
def memoria(self):
self.maestro.destroy()
self.media.stop()
# Ventana de video
class Ventana5:
def __init__(self,maestro,n):
self.maestro=maestro
self.i=0
self.aux=True
self.ban=True
self.n=n
maestro.title("Reproductor de video")
self.media=vlc.MediaPlayer(self.n[self.i])
self.media.play()
# Se abre la ventana y comienza la reproducción, ademas se crean los botones
self.boton1=Button(maestro,text=">>>",command=self.spt)
self.boton1.pack()
self.boton2=Button(maestro,text="<<<",command=self.retro)
self.boton2.pack()
self.boton4=Button(maestro,text="▌▌",command=self.pausa)
self.boton4.pack()
self.boton3=Button(maestro,text="#SALIR#",command=self.memoria)
self.boton3.pack()
# Función del botón de pausa
def pausa(self):
if(self.ban==True):
self.media.set_pause(1)
self.ban=False
else:
self.media.play()
self.ban=True
# Carga el siguinete vídeo en la lista
def spt(self):
self.media.stop()
self.i=self.i+1
# Carga el siguiente video
if(self.i<len(self.n)):
self.media=vlc.MediaPlayer(self.n[self.i])
self.media.play()
# Reinicia la lista
else:
self.i=0
self.media=vlc.MediaPlayer(self.n[self.i])
self.media.play()
# Retrocede en la lista para reproducir el vídeo anterior
def retro(self):
self.i=self.i-1
self.media.stop()
# Si ha siguiente lo reproduce
if(self.i>=0):
self.media=vlc.MediaPlayer(self.n[self.i])
self.media.play()
# Si no hay siguiente reinicia la lista
else:
self.i=len(self.n)-1
self.media=vlc.MediaPlayer(self.n[self.i])
self.media.play()
def memoria(self):
self.maestro.destroy()
self.media.stop()
# Se crea la lista de archivos multimedia leídos
def concatena(n):
for i in range(len(n)):
contenido=os.listdir('/media/pi/')
n[i]="/media/pi/"+contenido[0]+"/"+n[i]
return n
# Se cerean hilos para emepezar la carga del archivo siguiente aún cuando la ventana
# actual este en el loop
def incrementar(num,**datos):
while (datos['inicio'].devolver()):
if(pygame.mixer.music.get_busy()==0):
a=datos['inicio'].devolveri()
# Se verifica que i no este al final de la lista
if(a==len(datos['arreglo'])-1):
a=-1
a=a+1
#Se poner el valor de i en el objeto de la clase ventana
# para que pueda ser usado por los métodos de los botones
datos['inicio'].poneri(a)
pygame.mixer.music.load(datos['arreglo'][a])
pygame.mixer.music.play()
# Cuando termina un archivo de audio comienza el siguiente en automático
pygame.mixer.music.stop()
pygame.mixer.quit()
# Crea una instancia de reproductro de múscia
def audio(a):
ent=pygame.mixer.init()
a=[".mp3"]
arr=encontrar(a)
root=Tk()
a=0
arr=concatena(arr)
vent=Ventana3(root,arr)
# Se crea un hilo para reproducir el siguiente audio cuando termine el actual
inicia=threading.Thread(target=incrementar,args=(2,),kwargs={'inicio':vent,'arreglo':arr})
inicia.start()
root.mainloop()
# Crea una ventana y verifica los archivos de imagen permitidos
def imagenes(a):
a=['.jpg','.png','.jfif']
arr=encontrar(a)
root=Tk()
a=0
arr=concatena(arr)#Obtiene la lista con las rutas completas de los archivos
#de las imágenes
vent=Ventana4(root,arr)
root.mainloop()
# Crea una ventana y verifica los archivos de video permitidos
def videos(a):
a=['.mp4']
arr=encontrar(a)#
root=Tk()
a=0
arr=concatena(arr)
vent=Ventana5(root,arr)
root.mainloop()
# Función principal main
def main():
flag=True
while(flag):
# Creando la ventana
raiz = tk.Tk()
raiz.title("Centro Multimedia")
raiz.resizable(width=0, height=0)
raiz.config(bg="#282A36")
raiz.geometry("850x800")
raiz.tk.call('wm', 'iconphoto', raiz._w, tk.PhotoImage(file='/home/pi/Downloads/MultimediaCenter/img/multimedia.png'))
# Creación del frame principal
mainFrame = tk.Frame(raiz)
mainFrame.pack(fill="both", expand="True")
mainFrame.config(bg="#282A36")
# ---------- FRAME DE MÚSICA ----------
musicFrame = frame(mainFrame, 0.4)
title(musicFrame, "MÚSICA")
# Agregamos las imagenes para los botones
spotifyImg = tk.PhotoImage(file="/home/pi/Downloads/MultimediaCenter/img/spotify.png")
appleImg = tk.PhotoImage(file="/home/pi/Downloads/MultimediaCenter/img/apple.png")
deezerImg = tk.PhotoImage(file="/home/pi/Downloads/MultimediaCenter/img/deezer.png")
ymusicImg = tk.PhotoImage(file="/home/pi/Downloads/MultimediaCenter/img/ymusic.png")
# Creamos los botones de la sección
spotify = button(musicFrame, spotifyImg)
apple = button(musicFrame, appleImg)
deezer = button(musicFrame, deezerImg)
ymusic = button(musicFrame, ymusicImg)
# Agregamos la función a los botones
spotify.config(command=lambda: web('https://open.spotify.com'))
apple.config(command=lambda: web('https://music.apple.com/mx/browse'))
deezer.config(command=lambda: web('https://www.deezer.com/mx/'))
ymusic.config(command=lambda: web('https://music.youtube.com'))
# ---------- FRAME DE VIDEO ----------
videoFrame = frame(mainFrame, 0.4, 0.4)
title(videoFrame, "VIDEO")
# Agregamos las imagenes para los botones
youtubeImg = tk.PhotoImage(file="/home/pi/Downloads/MultimediaCenter/img/youtube.png")
netflixImg = tk.PhotoImage(file="/home/pi/Downloads/MultimediaCenter/img/netflix.png")
primeImg = tk.PhotoImage(file="/home/pi/Downloads/MultimediaCenter/img/prime-video.png")
hboImg = tk.PhotoImage(file="/home/pi/Downloads/MultimediaCenter/img/hbo.png")
disneyImg = tk.PhotoImage(file="/home/pi/Downloads/MultimediaCenter/img/disney.png")
crunchyImg = tk.PhotoImage(file="/home/pi/Downloads/MultimediaCenter/img/crunchy.png")
# Creamos los botones de la sección
youtube = button(videoFrame, youtubeImg)
netflix = button(videoFrame, netflixImg)
prime = button(videoFrame, primeImg)
hbo = button(videoFrame, hboImg)
disney = button(videoFrame, disneyImg)
crunchy = button(videoFrame, crunchyImg)
# Agregamos la función a los botones
youtube.config(command=lambda: web('https://www.youtube.com'))
netflix.config(command=lambda: web('https://www.netflix.com/mx-en/login'))
prime.config(command=lambda: web('https://www.primevideo.com'))
hbo.config(command=lambda: web('https://www.hbolatam.com/us/account/login'))
disney.config(command=lambda: web('https://www.disneyplus.com/login'))
crunchy.config(command=lambda: web('https://www.crunchyroll.com/es/welcome/login'))
# ---------- FRAME DE MULTIMEDIA ----------
multiFrame = frame(mainFrame, 0.2, 0.8)
title(multiFrame, "MULTIMEDIA")
# Agregamos las imagenes para los botones
musicImg = tk.PhotoImage(file="/home/pi/Downloads/MultimediaCenter/img/music.png")
videoImg = tk.PhotoImage(file="/home/pi/Downloads/MultimediaCenter/img/video.png")
imagenImg = tk.PhotoImage(file="/home/pi/Downloads/MultimediaCenter/img/imagen.png")
# Creamos los botones de la sección
music = button(multiFrame, musicImg)
video = button(multiFrame, videoImg)
imagen = button(multiFrame, imagenImg)
# Agregamos la función a los botones
music.config(command=lambda: audio(a))
video.config(command=lambda: videos(a))
imagen.config(command=lambda: imagenes(a))
# Creamos variables para la reproducción multimedia
a=0
r=[".mp3"]
arr=encontrar(r)
b=[".mp4"]
arr2=encontrar(b)
c=[".jpg",".png",".jfif"]
arr3=encontrar(c)
if len(arr)!=0 and len(arr2)==0 and len(arr3)==0:
a=1
elif len(arr)==0 and len(arr2)!=0 and len(arr3)==0:
a=3
elif len(arr)==0 and len(arr2)==0 and len(arr3)!=0:
a=2
# Ejecutamos la función main
if __name__ == "__main__":
main() |
__init__.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016, ParaTools, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# (1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# (2) Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# (3) Neither the name of ParaTools, Inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Unit test initializations and utility functions."""
import os
import sys
import glob
import shutil
import atexit
import tempfile
import unittest
from unittest import skipIf, skipUnless
from taucmdr.util import get_command_output
import warnings
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from taucmdr import logger, TAUCMDR_HOME, EXIT_SUCCESS, EXIT_FAILURE
from taucmdr.error import ConfigurationError
from taucmdr.cf.compiler import InstalledCompiler
from taucmdr.cf.storage.levels import PROJECT_STORAGE, USER_STORAGE, SYSTEM_STORAGE
_DIR_STACK = []
_CWD_STACK = []
_TEMPDIR_STACK = []
_NOT_IMPLEMENTED = []
def _destroy_test_workdir(path):
onerror = lambda f, p, e: sys.stderr.write("\nERROR: Failed to clean up testing directory %s\n" % p)
shutil.rmtree(path, ignore_errors=False, onerror=onerror)
def push_test_workdir():
"""Create a new working directory for a unit test.
Sets the current working directory and :any:`tempfile.tempdir` to the newly created test directory.
Directories created via this method are tracked. If any of them exist when the program exits then
an error message is shown for each.
"""
path = tempfile.mkdtemp()
try:
test_src = os.path.join(TAUCMDR_HOME, '.testfiles', 'foo_launcher')
test_dst = os.path.join(path, 'foo_launcher')
shutil.copy(test_src, test_dst)
get_command_output('%s/foo_launcher' %path)
except OSError:
shutil.rmtree(path)
path = tempfile.mkdtemp(dir=os.getcwd())
_DIR_STACK.append(path)
_CWD_STACK.append(os.getcwd())
_TEMPDIR_STACK.append(tempfile.tempdir)
os.chdir(path)
tempfile.tempdir = path
def pop_test_workdir():
"""Recursively deletes the most recently created unit test working directory.
Restores the current working directory and :any:`tempfile.tempdir` to their values before
:any:`push_test_workdir` was called.
"""
tempfile.tempdir = _TEMPDIR_STACK.pop()
os.chdir(_CWD_STACK.pop())
_destroy_test_workdir(_DIR_STACK.pop())
def get_test_workdir():
"""Return the current unit test's working directory."""
return _DIR_STACK[0]
def cleanup():
"""Checks that any files or directories created during testing have been removed."""
if _DIR_STACK:
for path in _DIR_STACK:
sys.stderr.write("\nWARNING: Test directory '%s' still exists, attempting to clean now...\n" % path)
_destroy_test_workdir(path)
atexit.register(cleanup)
def not_implemented(cls):
"""Decorator for TestCase classes to indicate that the tests have not been written (yet)."""
msg = "%s: tests have not been implemented" % cls.__name__
_NOT_IMPLEMENTED.append(msg)
return unittest.skip(msg)(cls)
def _null_decorator(_):
return _
def skipUnlessHaveCompiler(role):
"""Decorator to skip test functions when no compiler fills the given role.
If no installed compiler can fill this role then skip the test and report "<role> compiler not found".
Args:
role (_CompilerRole): A compiler role.
"""
# pylint: disable=invalid-name
try:
InstalledCompiler.find_any(role)
except ConfigurationError:
return unittest.skip("%s compiler not found" % role)
return _null_decorator
class TestCase(unittest.TestCase):
"""Base class for unit tests.
Performs tests in a temporary directory and reconfigures :any:`taucmdr.logger` to work with :any:`unittest`.
"""
# Follow the :any:`unittest` code style.
# pylint: disable=invalid-name
@classmethod
def setUpClass(cls):
push_test_workdir()
# Reset stdout logger handler to use buffered unittest stdout
# pylint: disable=protected-access
cls._orig_stream = logger._STDOUT_HANDLER.stream
logger._STDOUT_HANDLER.stream = sys.stdout
@classmethod
def tearDownClass(cls):
PROJECT_STORAGE.destroy(ignore_errors=True)
# Reset stdout logger handler to use original stdout
# pylint: disable=protected-access
logger._STDOUT_HANDLER.stream = cls._orig_stream
pop_test_workdir()
def run(self, result=None):
# Whenever running a test, set the terminal size large enough to avoid any regex failures due to line wrap
logger.TERM_SIZE=(150,150)
logger.LINE_WIDTH=logger.TERM_SIZE[0]
logger._STDOUT_HANDLER.setFormatter(logger.LogFormatter(line_width=logger.LINE_WIDTH, printable_only=True))
# Nasty hack to give us access to what sys.stderr becomes when unittest.TestRunner.buffered == True
# pylint: disable=attribute-defined-outside-init
assert result is not None
self._result_stream = result.stream
return super(TestCase, self).run(result)
def reset_project_storage(self, init_args=None):
"""Delete and recreate project storage.
Effectively the same as::
> rm -rf .tau
> tau initialize [init_args]
Args:
init_args (list): Command line arguments to `tau initialize`.
"""
from taucmdr.cli.commands.initialize import COMMAND as initialize_cmd
PROJECT_STORAGE.destroy(ignore_errors=True)
argv = ['--project-name', 'proj1', '--target-name', 'targ1', '--application-name', 'app1', '--tau', 'nightly']
if init_args is not None:
argv.extend(init_args)
if '--bare' in argv or os.path.exists(os.path.join(SYSTEM_STORAGE.prefix, 'tau')):
initialize_cmd.main(argv)
else:
# If this is the first time setting up TAU and dependencies then we need to emit output so
# CI drivers like Travis don't think our unit tests have stalled.
import time
import threading
def worker():
initialize_cmd.main(argv)
thread = threading.Thread(target=worker)
tstart = time.time()
thread.start()
self._result_stream.write("\nInitializing TAU and dependencies:\n")
self._result_stream.write(" @SYSTEM='%s'\n" % SYSTEM_STORAGE.prefix)
self._result_stream.write(" @USER='%s'\n" % USER_STORAGE.prefix)
while thread.is_alive():
time.sleep(5)
self._result_stream.write('.')
elapsed = time.time() - tstart
self._result_stream.writeln('\nTAU initialized in %s seconds' % elapsed)
def destroy_project_storage(self):
"""Delete project storage.
Effectively the same as::
> rm -rf .tau
"""
PROJECT_STORAGE.destroy(ignore_errors=True)
def exec_command(self, cmd, argv):
"""Execute a tau command's main() routine and return the exit code, stdout, and stderr data.
Args:
cmd (type): A command instance that has a `main` callable attribute.
argv (list): Arguments to pass to cmd.main()
Returns:
tuple: (retval, stdout, stderr) results of running the command.
"""
# pylint: disable=protected-access
stdout = StringIO()
stderr = StringIO()
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = stdout
sys.stderr = stderr
logger._STDOUT_HANDLER.stream = stdout
try:
retval = cmd.main(argv)
except SystemExit as err:
retval = err.code
stdout_value = stdout.getvalue()
stderr_value = stderr.getvalue()
orig_stdout.write(stdout_value)
orig_stderr.write(stderr_value)
return retval, stdout_value, stderr_value
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
logger._STDOUT_HANDLER.stream = orig_stdout
def copy_testfile(self, src, dst=None):
test_src = os.path.join(TAUCMDR_HOME, '.testfiles', src)
test_dst = os.path.join(get_test_workdir(), dst or src)
shutil.copy(test_src, test_dst)
def assertCompiler(self, role, target_name='targ1'):
from taucmdr.model.target import Target
targ_ctrl = Target.controller(PROJECT_STORAGE)
targ = targ_ctrl.one({'name': target_name})
try:
return targ.populate(role.keyword)['path']
except KeyError:
self.fail("No %s compiler in target '%s'" % (role, target_name))
def assertCommandReturnValue(self, return_value, cmd, argv):
retval, stdout, stderr = self.exec_command(cmd, argv)
self.assertEqual(retval, return_value)
return stdout, stderr
def assertNotCommandReturnValue(self, return_value, cmd, argv):
retval, stdout, stderr = self.exec_command(cmd, argv)
self.assertNotEqual(retval, return_value)
return stdout, stderr
def assertManagedBuild(self, return_value, compiler_role, compiler_args, src):
from taucmdr.cli.commands.build import COMMAND as build_command
self.copy_testfile(src)
cc_cmd = self.assertCompiler(compiler_role)
args = [cc_cmd] + compiler_args + [src]
self.assertCommandReturnValue(return_value, build_command, args)
def assertInLastTrialData(self, value, data_type='tau'):
from taucmdr.model.project import Project
trial = Project.selected().experiment().trials()
data_files = trial[0].get_data_files()
if data_type == 'tau':
data = []
for profile_file in glob.glob(os.path.join(data_files['tau'], 'profile.*.*.*')):
with open(profile_file) as fin:
buff = fin.read()
if value in buff:
return
data.append(buff)
else:
raise NotImplementedError
self.fail("'%s' not found in '%s'" % (value, '\n'.join(data)))
class TestRunner(unittest.TextTestRunner):
"""Test suite runner."""
def __init__(self, *args, **kwargs):
super(TestRunner, self).__init__(*args, **kwargs)
self.buffer = True
def run(self, test):
result = super(TestRunner, self).run(test)
for item in _NOT_IMPLEMENTED:
print "WARNING: %s" % item
if result.wasSuccessful():
return EXIT_SUCCESS
return EXIT_FAILURE
|
run_server.py | import argparse
import os
from multiprocessing import Process
from common import client_ai_teaming, pairing_clients
from config import (DEFAULT_DATA_SAVE_PATH, DEFAULT_SERVER_ADDR,
DEFAULT_SERVER_PORT)
from network import Server, send
from tasks.affective_task import ServerAffectiveTask
from tasks.finger_tapping_task import ServerFingerTappingTask
from tasks.ping_pong_task import ServerPingPongTask
from tasks.rest_state import ServerRestState
REQUIRED_NUM_CONNECTIONS_REST_STATE = [1, 2, 3]
REQUIRED_NUM_CONNECTIONS_FINGER_TAPPING_TASK = [1, 2, 3]
REQUIRED_NUM_CONNECTIONS_AFFECTIVE_TASK = [1, 2, 3]
REQUIRED_NUM_CONNECTIONS_COMPETITIVE_PING_PONG_TASK = [2, 4]
REQUIRED_NUM_CONNECTIONS_COOPERATIVE_PING_PONG_TASK = [3, 4]
def _send_start(to_client_connections: list):
data = {}
data["type"] = "request"
data["request"] = "start"
send(to_client_connections, data)
def _run_ping_pong(to_client_connections: list,
from_client_connections: dict,
session_name: str,
easy_mode: bool = True,
data_save_path: str = ''):
server_ping_pong_task = ServerPingPongTask(to_client_connections,
from_client_connections,
easy_mode=easy_mode,
session_name=session_name,
data_save_path=data_save_path)
server_ping_pong_task.run()
def _run_affective_task(to_client_connections: list,
from_client_connections: dict,
session_name: str,
data_save_path: str = ''):
server_affective_task = ServerAffectiveTask(to_client_connections,
from_client_connections,
session_name=session_name,
data_save_path=data_save_path)
server_affective_task.run("./tasks/affective_task/images/task_images", collaboration=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run server of finger tapping task.')
parser.add_argument("-a", "--address", default=DEFAULT_SERVER_ADDR, help="IP address of server")
parser.add_argument("-p", "--port", type=int, default=DEFAULT_SERVER_PORT, help="Port of server")
parser.add_argument("-s", "--save", default=DEFAULT_DATA_SAVE_PATH, help="Specify where to save data")
args = parser.parse_args()
data_path = args.save + "/finger_tapping"
if not os.path.exists(data_path):
os.makedirs(data_path)
data_path = args.save + "/affective"
if not os.path.exists(data_path):
os.makedirs(data_path)
data_path = args.save + "/ping_pong"
if not os.path.exists(data_path):
os.makedirs(data_path)
server = Server(args.address, args.port)
# Initial rest state
server.establish_connections(REQUIRED_NUM_CONNECTIONS_REST_STATE)
_send_start(list(server.to_client_connections.values()))
server_rest_state = ServerRestState(list(server.to_client_connections.values()),
server.from_client_connections)
server_rest_state.run()
# Finger tapping task
server.establish_connections(REQUIRED_NUM_CONNECTIONS_FINGER_TAPPING_TASK)
_send_start(list(server.to_client_connections.values()))
server_finger_tapping_task = ServerFingerTappingTask(list(server.to_client_connections.values()),
server.from_client_connections,
data_save_path=args.save)
server_finger_tapping_task.run()
# Individual affective task
server.establish_connections(REQUIRED_NUM_CONNECTIONS_AFFECTIVE_TASK)
_send_start(list(server.to_client_connections.values()))
affective_task_processes = []
for from_client_connection, client_name in server.from_client_connections.items():
to_client_connection = server.to_client_connections[client_name]
session_name = "individual_" + client_name
process = Process(target=_run_affective_task,
args=([to_client_connection], {from_client_connection: client_name}, session_name, args.save))
affective_task_processes.append(process)
for process in affective_task_processes:
process.start()
for process in affective_task_processes:
process.join()
# Team affective task
server.establish_connections(REQUIRED_NUM_CONNECTIONS_AFFECTIVE_TASK)
_send_start(list(server.to_client_connections.values()))
server_affective_task = ServerAffectiveTask(list(server.to_client_connections.values()),
server.from_client_connections,
session_name="team",
data_save_path=args.save)
server_affective_task.run("./tasks/affective_task/images/task_images", collaboration=True)
server_affective_task.close_file()
# Ping pong competitive
server.establish_connections(REQUIRED_NUM_CONNECTIONS_COMPETITIVE_PING_PONG_TASK)
_send_start(list(server.to_client_connections.values()))
client_pairs = pairing_clients(server.to_client_connections, server.from_client_connections)
ping_pong_processes = []
for session_id, (to_client_connection_pair, from_client_connection_pair) in enumerate(client_pairs):
to_client_connections = []
for to_client_connection_team in to_client_connection_pair:
to_client_connections = to_client_connections + list(to_client_connection_team.values())
session_name = "competitive_" + str(session_id)
process = Process(target=_run_ping_pong, args=(to_client_connections, from_client_connection_pair, session_name, args.save))
ping_pong_processes.append(process)
for process in ping_pong_processes:
process.start()
for process in ping_pong_processes:
process.join()
# Ping pong cooperative
server.establish_connections(REQUIRED_NUM_CONNECTIONS_COOPERATIVE_PING_PONG_TASK)
_send_start(list(server.to_client_connections.values()))
client_pairs = client_ai_teaming(server.to_client_connections, server.from_client_connections)
ping_pong_processes = []
for session_id, (to_client_connection_teams, from_client_connection_teams) in enumerate(client_pairs):
to_client_connections = []
for to_client_connection_team in to_client_connection_teams:
to_client_connections = to_client_connections + list(to_client_connection_team.values())
session_name = "cooperative_" + str(session_id)
process = Process(target=_run_ping_pong, args=(to_client_connections, from_client_connection_teams, session_name, False, args.save))
ping_pong_processes.append(process)
for process in ping_pong_processes:
process.start()
for process in ping_pong_processes:
process.join()
server.establish_connections()
server.close_connections_listener()
|
__init__.py |
###############################################################################
# DO NOT MODIFY THIS FILE #
###############################################################################
import inspect
import logging
import sys
import time
from collections import namedtuple
from enum import Enum
from multiprocessing import Process, Queue, Pipe
from queue import Empty
from .isolation import Isolation, DebugState
__all__ = ['Isolation', 'DebugState', 'Status', 'play', 'fork_get_action']
logger = logging.getLogger(__name__)
Agent = namedtuple("Agent", "agent_class name")
PROCESS_TIMEOUT = 2 # time to interrupt agent search processes (in seconds)
GAME_INFO = """\
Initial game state: {}
First agent: {!s}
Second agent: {!s}
"""
ERR_INFO = """\
Error playing game: {!s}
Initial state: {}
First agent: {!s}
Second agent: {!s}
Final state: {}
Action history: {!s}
"""
RESULT_INFO = """\
Status: {}
Final State: {}
History: {}
Winner: {}
Loser: {}
"""
class Status(Enum):
EXCEPTION = 0
TIMEOUT = 1
INVALID_MOVE = 2
GAME_OVER = 3
class StopSearch(Exception): pass # Exception class used to halt search
class Countdown_Timer: # Timer object used to monitor time spent on search
def __init__(self, time_limit):
self.__time_limit = time_limit / 1000.
self.__stop_time = None
def set_start_time(self, start_time):
self.__stop_time = self.__time_limit + start_time
def check_time(self):
return (self.__stop_time - time.perf_counter()) * 1000.
def __call__(self):
return time.perf_counter() > self.__stop_time
def play(args): return _play(*args) # multithreading ThreadPool.map doesn't expand args
def _play(agents, game_state, time_limit, match_id):
""" Run a match between two agents by alternately soliciting them to
select a move and applying it to advance the game state.
Parameters
----------
agents : tuple
agents[i] is an instance of isolation.Agent class (namedtuple)
game_state: Isolation
an instance of Isolation.Isolation in the initial game state;
assumes that agents[game_state.ply_count % 2] is the active
player in the initial state
time_limit : numeric
The maximum number of milliseconds to allow before timeout during
each turn (see notes)
Returns
-------
(agent, list<[(int, int),]>, Status)
Return multiple including the winning agent, the actions that
were applied to the initial state, a status code describing the
reason the game ended, and any error information
"""
initial_state = game_state
game_history = []
winner = None
players = [a.agent_class(player_id=i) for i, a in enumerate(agents)]
logger.info(GAME_INFO.format(initial_state, *agents))
while not game_state.terminal_test():
active_idx = game_state.ply_count % 2
try:
action = fork_get_action(game_state, players[active_idx], time_limit)
except Empty:
logger.info(
"{} get_action() method did not respond within {} milliseconds".format(
agents[active_idx], time_limit
))
logger.info(RESULT_INFO.format(
Status.TIMEOUT, game_state, game_history, agents[1 - active_idx], agents[active_idx]
))
winner = agents[1 - active_idx]
break
except Exception as err:
logger.error(ERR_INFO.format(
err, initial_state, agents[0], agents[1], game_state, game_history
))
winner = agents[1 - active_idx]
break
if action not in game_state.actions():
logger.info(RESULT_INFO.format(
Status.INVALID_MOVE, game_state, game_history, agents[1 - active_idx], agents[active_idx]
))
winner = agents[1 - active_idx]
break
game_state = game_state.result(action)
game_history.append(action)
if winner is not None: # Timeout, invalid move, or unknown exception
pass
elif game_state.utility(active_idx) > 0:
logger.info(RESULT_INFO.format(
Status.GAME_OVER, game_state, game_history, agents[active_idx], agents[1 - active_idx]
))
winner = agents[active_idx]
elif game_state.utility(1 - active_idx) > 0:
logger.info(RESULT_INFO.format(
Status.GAME_OVER, game_state, game_history, agents[1 - active_idx], agents[active_idx]
))
winner = agents[1 - active_idx]
else:
raise RuntimeError(("A game ended without a winner.\n" +
"initial game: {}\nfinal game: {}\naction history: {}\n").format(
initial_state, game_state, game_history))
return winner, game_history, match_id
def fork_get_action(game_state, active_player, time_limit):
action_queue = Queue()
listener, client = Pipe()
active_player.queue = action_queue # give the agent instance a threadsafe queue
# comment out these lines for debugging mode
p = Process(target=_request_action, args=(active_player, game_state, time_limit, client))
p.start()
p.join(timeout=PROCESS_TIMEOUT)
if p and p.is_alive(): p.terminate()
# Uncomment these lines to run in debug mode, which runs the search function in the
# main process so that debuggers and profilers work properly. NOTE: calls to your
# search methods will NOT timeout in debug mode; you must be very careful to avoid
# calls that are not methods of your CustomPlayer class or else your agent may fail
#
# from copy import deepcopy
# active_player.queue = None
# active_player = deepcopy(active_player)
# active_player.queue = action_queue
# _request_action(active_player, game_state, time_limit, client)
if listener.poll():
active_player.context = listener.recv() # preserve any internal state
while True: # treat the queue as LIFO
action = action_queue.get_nowait() # raises Empty if agent did not respond
if action_queue.empty(): break
return action
def _callable(member):
return inspect.ismethod(member) or inspect.isfunction(member)
def _timeout(func, timer):
""" Decorator to check for timeout each time a function is called """
def _func(*args, **kwargs):
if timer(): raise StopSearch
return func(*args, **kwargs)
return _func
def _wrap_timer(obj, timer):
""" Wrap each method of an object with a timeout test """
for name, method in inspect.getmembers(obj, _callable):
setattr(obj, name, _timeout(method, timer))
return obj
def _request_action(agent, game_state, time_limit, conn):
""" Augment agent instances with a countdown timer on every method before
calling the get_action() method and catch countdown timer exceptions.
Wrapping the methods must happen here because the wrapped methods cannot
be passed between processes
"""
timer = Countdown_Timer(time_limit)
agent = _wrap_timer(agent, timer)
timer.set_start_time(time.perf_counter())
# Catch StopSearch exceptions on timeout, but do not catch other exceptions
try:
agent.get_action(game_state)
except StopSearch:
pass
conn.send(agent.context) # pass updated agent back to calling process
|
reverseSplice.py | from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import csv
from datetime import datetime
import multiprocessing
from multiprocessing import Queue
import logging
import re
import io
import traceback
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
PROT_THRESH = 5000000
STOP = "STOP"
def generateOutput(outputPath, proteinFile, peptideFile, linFlag, cisFlag, transFlag, overlapFlag, minTransLen, maxDistance):
"""
Called from Example.createOutput() in reverseSpliceGUI.py, this function recieves the input parameters from the
GUI code, creates the protein dictionary from the input path and then calls generateOrigins to begin the
origin computation process.
:param outputPath: the path of the output file as selected by the user.
:param proteinFile: the path of the protein sequence file.
:param peptideFile: the path of the peptide sequence file.
:param linFlag: True if the user wishes to return a file with information on where each peptide could have been
generated from via linear splicing.
:param cisFlag: True if the user wishes to return a file with information on where each peptide could have been
generated from via cis splicing.
:param transFlag: True if the user wishes to return a file with information on where each peptide could have been
generated from via trans splicing.
:param overlapFlag: True if the user has selected no overlap when running cis splicing.
:param minTransLen: the minimum length a cleavage must be for it to be reported in the output file as an origin
for a trans spliced peptide.
:param maxDistance: the maximum distance two cleavages can be away from each other for them to combined in cis
splicing. Will be 'None' if the user wants no maximum.
:return:
"""
protDictList = protFastaToDict(proteinFile)
generateOrigins(protDictList, peptideFile, outputPath, linFlag, cisFlag, transFlag, overlapFlag, minTransLen, maxDistance)
def protFastaToDict(protFile):
"""
Called by generateOutput(), this function stores all the sequences in a fasta file in a dictionary.
:param protFile: A Fasta file path (containing all the proteins which could serve as an origin location)
:return protDictList: A list of dictionaries which contain a protein sequence as the key, and the protein name as the
value. The list is creates to ensure the dictionary is small enough to be passed into the pool as a global variable.
"""
protDictList = []
protDict = {}
with open(protFile, "rU") as handle:
counter = 1
for record in SeqIO.parse(handle, 'fasta'):
seq = str(record.seq)
name = 'rec' + str(counter)
protDict[name] = seq
if counter % PROT_THRESH == 0:
protDictList.append(protDict)
protDict = {}
counter+=1
if protDict:
protDictList.append(protDict)
return protDictList
def generateOrigins(protDictList, pepFile, outputPath, linFlag, cisFlag, transFlag, overlapFlag, minTransLen, maxDistance):
"""
Called by generateOutput(), this function controls the calling of a separate function for linear, cis and trans
splicing.
:return protDictList: A list of dictionaries which contain a protein sequence as the key, and the protein name as the
value.
:param pepFile: A file with a list of peptides that you want to find the origin locations for
:param outputPath: Where you want to store the output file
:param linFlag: Boolean flag to determine checking for linear peptides
:param cisFlag: Boolean flag to determine checking for cis peptides
:param transFlag: Boolean flag to determine checking for trans peptides
:param overlapFlag: True if the user has selected no overlap when running cis splicing.
:param minTransLen: the minimum length a cleavage must be for it to be reported in the output file as an origin
for a trans spliced peptide.
:param maxDistance: the maximum distance two cleavages can be away from each other for them to combined in cis
splicing. Will be 'None' if the user wants no maximum.
The purpose of this function is to find the aforementioned origins based on the flags passed through and then write
the output to a Fasta file
"""
if linFlag:
# Find linear origins
findLinOrigins(protDictList, pepFile, outputPath)
if cisFlag:
# Find cis origins
findCisOrigins(protDictList, pepFile, outputPath, overlapFlag, maxDistance)
if transFlag:
# Find trans origins > work in progress
findTransOrigins(protDictList, pepFile, outputPath, minTransLen)
def findLinOrigins(protDictList, pepFile, outputPath):
"""
Called by generateOrigins(), this function takes the protDict and pepList and creates processes which compute
where within the protein list each peptide could have been generated from via linear splicing. Each process it
creates outputs a dictionary with the peptides as keys and a list of tuples containing origin peptide names and the
splitsLocation data for linear splicing as the value.
:return protDictList: A list of dictionaries which contain a protein sequence as the key, and the protein name as the
value.
:param pepFile: A file containing a list of peptides that you want to find the linear origin locations for
:return linOriginsDict: Has the peptide as a key and a list of tuples of the form (originProtName, locations).
Locations store information on where the corresponding peptide could have been generated
from in the relevant origin protein.
:Data structure summary: linOriginsDict[peptide] = [(proteinName, locations),(proteinName, locations)]
"""
outputPath = outputPath + '_' + 'Linear' + '-' + datetime.now().strftime("%d%m%y_%H%M") + '.csv'
for protDict in protDictList:
numWorkers = multiprocessing.cpu_count()
toWriteQueue = multiprocessing.Queue()
pool = multiprocessing.Pool(processes=numWorkers, initializer=processLinInitArgs,
initargs=(toWriteQueue,protDict))
writerProcess = multiprocessing.Process(target=writer, args=(toWriteQueue, outputPath, 'Linear', protDict))
writerProcess.start()
# iterate through each peptide for each split up of the input
with open(pepFile, "rU") as handle:
for record in SeqIO.parse(handle, 'fasta'):
pep = str(record.seq)
#logging.info('Process started for: ' + str(pep))
pool.apply_async(linearOrigin, args=(pep,))
pool.close()
pool.join()
logging.info("Pool joined")
toWriteQueue.put(STOP)
writerProcess.join()
def linearOrigin(pep):
"""
Called as the worker function to the pool in findLinOrigins(), this function takes an individual peptide and a
dictionary of protein sequences, and returns the proteins and locations within from which the peptide could be
generated via linear splicing. Once the origin data is compiled, it is added to the toWriteQueue so that it
can be processed by the linearWriter() function.
:param pep: the peptide which the user wishes to find potential linear splicing origins of.
:param protDict: a dictionary containing a portion (possibly all) of the input protein sequences.
:return:
"""
try:
#print(pep)
linOriginDict = {}
# initialise the key as an empty list in the outputDict
linOriginDict[pep] = []
# iterate through each protSeq in the keys of protDict
for key, value in proteinDict.items():
# initialise the locations holder
locations = []
# change the Is to Ls for both the pep and prot, as they have identical masses and
# are indeciferable on mass spec.
alteredPep = pep.replace('I', 'L')
alteredProt = value.replace('I', 'L')
# re.finditer(substring, string) returns an iterable with the start and finish indices of all the locations
# of the substring in the string. The iterator is empty if the susbset does not appear at all.
# We thus iterate through all instances of the subset and append it to a list of locations.
for x in re.finditer(alteredPep, alteredProt):
# convert the start index and end index to a list of indexes and then append to locations
# locations structure is a list of lists: [[1,2,3,4],[5,6,7,8]]
locations.append([x.start(), x.end() - 1])
# if nothing is added to locations, it means that the peptide was not found in the protein, and we continue
# iterating through proteins.
if locations:
# otherwise if we have added to locations, we append the protName/location tup to linOriginDict
linOriginDict[pep].append((key, locations))
linearOrigin.toWriteQueue.put(linOriginDict)
logging.info('Process complete for: ' + str(pep))
except Exception as e:
exc_buffer = io.StringIO()
traceback.print_exc(file=exc_buffer)
errorString = 'Uncaught exception in worker process: ' + pep + '\n%s'
logging.error(
errorString,
exc_buffer.getvalue())
raise e
def processLinInitArgs(toWriteQueue, protDict):
"""
Called from findLinOrigins() when the pool is initialised, this function simply gives linearOrigin() (the worker
function for each process in the pool) access to the toWriteQueue.
"""
linearOrigin.toWriteQueue = toWriteQueue
global proteinDict
proteinDict = protDict
def findCisOrigins(protDictList, pepFile, outputPath, overlapFlag, maxDistance):
"""
Called by generateOrigins(), this function takes the protDict and pepList and creates processes which compute
where within the protein list each peptide could have been generated from via cis splicing. Each process it
creates outputs a dictionary with the peptides as keys and a list of tuples containing origin peptide names and the
splitsLocation data for cis splicing as the value.
Data structure summary: cisOriginsDict[peptide] = [(proteinName, locations),(proteinName, locations)]
:return protDictList: A list of dictionaries which contain a protein sequence as the key, and the protein name as the
value.
:param pepFile: A file containing a list of peptides that you want to find the linear origin locations for.
:param outputPath: the location and name of the output file as selected by the user.
:param overlapFlag: True if the user has selected no overlap when running cis splicing.
:param maxDistance: the maximum distance two cleavages can be away from each other for them to combined in cis
splicing. Will be 'None' if the user wants no maximum.
:return:
"""
outputPath = outputPath + '_' + 'Cis' + '-' + datetime.now().strftime("%d%m%y_%H%M") + '.csv'
for protDict in protDictList:
numWorkers = multiprocessing.cpu_count()
toWriteQueue = multiprocessing.Queue()
pool = multiprocessing.Pool(processes=numWorkers, initializer=processCisInitArgs,
initargs=(toWriteQueue,))
writerProcess = multiprocessing.Process(target=writer, args=(toWriteQueue, outputPath, 'Cis', protDict, (overlapFlag, maxDistance)))
writerProcess.start()
# iterate through each pep in pepList
with open(pepFile, "rU") as handle:
for record in SeqIO.parse(handle, 'fasta'):
pep = str(record.seq)
#logging.info('Cis Process started for: ' + str(pep))
pool.apply_async(cisOrigin, args=(pep, protDict, overlapFlag, maxDistance))
pool.close()
pool.join()
logging.info("Pool joined")
toWriteQueue.put(STOP)
writerProcess.join()
def cisOrigin(pep, protDict, overlapFlag, maxDistance):
"""
Called as the worker process to the pool in findCisOrigins(), this function takes an individual peptide and a
dictionary of protein sequences, and returns the proteins and locations within them from which the peptide could be
generated via cis splicing. Once the origin data is compiled, it is added to the toWriteQueue so that it
can be processed by the writer() function.
:param pep: the peptide which the user wishes to find potential linear splicing origins of.
:param protDict: a dictionary containing all the input protein sequences.
:param overlapFlag: True if the user has selected no overlap when running cis splicing.
:param maxDistance: the maximum distance two cleavages can be away from each other for them to combined in cis
splicing. Will be 'None' if the user wants no maximum.
:return:
"""
try:
cisOriginDict = {}
# initialise that key in the dictionary
cisOriginDict[pep] = []
# create the altered pep:
alteredPep = pep.replace('I', 'L')
# find the splits which could be combined to create the peptide using Cis splicing.
# cisSplits is a list of tups, where each tuple contains two complimentary splits.
# cisSplits = [('A', 'BCD'),('AB', 'CD'),('ABC', 'D')]
cisSplits = findSplits(alteredPep)
# iterate through each protein in protDict.keys()
for protName, protSeq in protDict.items():
# replace all Is with Js as they are indeciferable on mass spec.
alteredProt = protSeq.replace('I', 'L')
# change the linFlag if the pep exists as a linear peptide somewhere in the input
if alteredPep in alteredProt:
cisOriginDict[pep] = []
cisOrigin.toWriteQueue.put(cisOriginDict)
return
# find the location data corresponding the current protSeq.
locations = findCisIndexes(cisSplits, alteredProt, overlapFlag, maxDistance)
# if there no pairs of splits are located in protSeq, findCisIndexes() will return an empty list.
# If so, continue.
if locations == []:
continue
# if it is possible to create the peptide using cis splicing from the current protein, add the protName
# and locations tuple to cisOriginsDict
cisOriginDict[pep].append((protName, locations))
else:
cisOrigin.toWriteQueue.put(cisOriginDict)
logging.info('Cis Process complete for: ' + str(pep))
except Exception as e:
exc_buffer = io.StringIO()
traceback.print_exc(file=exc_buffer)
errorString = 'Uncaught exception in worker process: ' + pep + '\n%s'
logging.error(
errorString,
exc_buffer.getvalue())
raise e
def processCisInitArgs(toWriteQueue):
"""
Called from findLinOrigins() when the pool is initialised, this function simply gives linearOrigin() (the worker
function for each process in the pool) access to the toWriteQueue.
"""
cisOrigin.toWriteQueue = toWriteQueue
def findSplits(pep):
"""
Called by cisOrigins() and transOrigins(), this function takes a peptide and returns a list of tuples, where each
tuple is a possible pair of subsequences which could be combined to make the peptide.
:param pep: the input peptide. From this peptide, a list of all the pair of cleavages which could be combined to
make the peptide is returned.
:return cisSplits: a list of tuples, where each tuple is a possible pair of subsequences which could be combined
to make the peptide.
"""
cisSplits = []
lngth = len(pep)
for i in range(1,lngth):
split1 = pep[0:i]
split2 = pep[i:lngth+1]
cisSplits.append((split1,split2))
return cisSplits
def findCisIndexes(cisSplits, protSeq, overlapFlag, maxDistance):
"""
Called by cisOrigins(), this function returns the location data of where different pairs of cisSplits (which
combine to form a given peptide) exist in a protein sequence. The output data structure is a series of embedded
lists. The outer list contains lists relating to each cisSplit combination which has both splits found in the protSeq.
The list relating to each cisSplit combination contains two lists; a list of all the places a first split can
occur and a list of all the places the second split can occur.
Thus our final data structure looks like:
outerList[firstSplitCombo[firstSplitList[[1,2,3,4],[5,6,7,8]], secondSplitList[[9,10,11,12],[13,14,15,16]], secondSplitCombo[etc....]]
:param cisSplits: a list of all the pairs of cleavages which could be combined to form a given peptide via cis
splicing.
:param protSeq: the proteins sequence within which these pairs are being searched for. If both pairs are found,
the location data of where they were found within the protein is added to the data structure described above.
:param overlapFlag: True if the user has selected no overlap when running cis splicing.
:param maxDistance: the maximum distance two cleavages can be away from each other for them to combined in cis
splicing. Will be 'None' if the user wants no maximum.
:return:
"""
totalLocations = []
# iterate through each splitTup
for splitTup in cisSplits:
splitLocations = []
# assign split1 and split 2
split1 = splitTup[0]
split2 = splitTup[1]
splitLoc1 = []
splitLoc2 = []
# append the location of any split1 occurences in protSeq to splitLoc1
for x in re.finditer(split1, protSeq):
splitLoc1.append([x.start(), x.end() - 1])
# append the location of any split2 occurences in protSeq to splitLoc2
for x in re.finditer(split2, protSeq):
splitLoc2.append([x.start(), x.end() - 1])
# if either of the splitLocs are empty, the peptide cannot be formed using this split combo from the given
# protSeq. Thus continue if either are empty.
if splitLoc1 == [] or splitLoc2 == []:
continue
# if both splits exist, we check if the length of either of the splits is 1. If so, there are likely to be heaos
# of locations where this split exists. Thus, if the length is 1 we change the location to be simply the amino
# acid instead of all the positions it is located at within the peptide.
if len(split1) == 1:
#print("checking single aminos: ")
#print(splitLoc1)
#print(splitLoc2)
# if the user has selected not to run overlap, we need call editSingleAmino to ensure that overlap is
# satisfied when converting split1 to just hold the amino acid.
if overlapFlag:
splitLoc1, splitLoc2 = editSingleAmino(splitLoc1, splitLoc2, split1, maxDistance)
else:
splitLoc1 = split1
#print(splitLoc1)
#print(splitLoc2)
if len(split2) == 1:
# if the user has selected not to run overlap, we need call editSingleAmino to ensure that overlap is
# satisfied when converting split1 to just hold the amino acid.
#print("checking single aminos: ")
#print(splitLoc1)
#print(splitLoc2)
if overlapFlag:
splitLoc2, splitLoc1 = editSingleAmino(splitLoc2, splitLoc1, split2, maxDistance)
else:
splitLoc2 = split2
#print(splitLoc1)
#print(splitLoc2)
# Check if either split has become empty since running the split = 1 checks.
if splitLoc1 == [] or splitLoc2 == []:
continue
# we append all the stored up location data of split1 and split2 to the splitLocations list, which
# stores data relevant only to the specific combnation of splits in question. We then append
# this list to the totalLocations list which stores all the location data across all potential split combinations.
splitLocations.append(splitLoc1)
splitLocations.append(splitLoc2)
totalLocations.append(splitLocations)
return totalLocations
def editSingleAmino(splitLoc1, splitLoc2, split1, maxDistance):
"""
Called by findCisIndexes() if the user has selected not to consider overlap and split1 is only one amino acid.
This function edits splitLoc2 to include only the references which can match with split1 without overlap, and
edits splitLoc1 to be equal to split1.
:param splitLoc1: the location data of the single amino acid (split1)
:param splitLoc2: the location data of the second split which combines with split1 to create a cis spliced peptide.
:param split1: the single amino acid split being analysed.
:param maxDistance: the maximum distance two cleavages can be away from each other for them to combined in cis
splicing. Will be 'None' if the user wants no maximum.
:return split1: the single amino acid split being analysed.
:return splitLoc2: the location data of the second split edited to only include the references which can match
with split1 without overlap.
"""
# must then check that if the user has selected not to produce overlapped peptides, that at least one
# of the aminoacids is found outside the locations in splitLoc1
toDelete = []
i = 0
for loc2 in splitLoc2:
# flag to check if their exists a peptide which satisfies maxDistance and overlap criteria. Set to False if
# a satisfactory pair is found.
onlyOverlap = True
for loc1 in splitLoc1:
if not overlapCheck(loc1, loc2):
if maxDistance == 'None':
onlyOverlap = False
elif not checkMaxDistance(loc1, loc2, maxDistance):
onlyOverlap = False
# if onlyOverlap hasn't been set to False, we want to delete the split at the current index in splitLoc2.
if onlyOverlap:
toDelete.append(i)
i+=1
# reverse toDelete, iterate through and remove all splitLocs flagged for deletion.
toDelete.reverse()
for j in toDelete:
del splitLoc2[j]
return split1, splitLoc2
def findTransOrigins(protDictList, pepFile, outputPath, minTransLen):
"""
Called by generateOrigins(), this function takes the protDict and pepList and creates processes which compute
where within the protein list each peptide could have been generated from via trans splicing. Each process it
creates outputs a dictionary with the peptides as keys and a list of tuples containing origin peptide names and the
splitsLocation data for trans splicing as the value.
Data structure summary: transOriginsDict[peptide] = [(proteinName, locations),(proteinName, locations)]
:return protDictList: A list of dictionaries which contain a protein sequence as the key, and the protein name as the
value.
:param pepFile: A file containing a list of peptides that you want to find the linear origin locations for
:return:
"""
outputPath = outputPath + '_' + 'Trans' + '-' + datetime.now().strftime("%d%m%y_%H%M") + '.csv'
for protDict in protDictList:
numWorkers = multiprocessing.cpu_count()
toWriteQueue = multiprocessing.Queue()
pool = multiprocessing.Pool(processes=numWorkers, initializer=processTransInitArgs,
initargs=(toWriteQueue,))
writerProcess = multiprocessing.Process(target=writer, args=(toWriteQueue, outputPath, 'Trans', protDict))
writerProcess.start()
# iterate through each peptide
with open(pepFile, "rU") as handle:
for record in SeqIO.parse(handle, 'fasta'):
pep = str(record.seq)
#logging.info('Process started for: ' + str(pep))
pool.apply_async(transOrigin, args=(pep, protDict, minTransLen))
pool.close()
pool.join()
logging.info("Pool joined")
toWriteQueue.put(STOP)
writerProcess.join()
def transOrigin(pep,protDict, minTransLen):
"""
Called as the worker process to the pool in findTransOrigins(), this function takes an individual peptide and a
dictionary of protein sequences, and returns the proteins and locations within them from which the peptide could be
generated via trans splicing. Once the origin data is compiled, it is added to the toWriteQueue so that it
can be processed by the writer() function.
:param pep: the peptide which the user wishes to find potential linear splicing origins of.
:param protDict: a dictionary containing all the input protein sequences.
:param minTransLen: the minimum length that a cleavage must be for its location to be reported in the origin data
of a trans spliced peptide.
:return:
"""
try:
# initialise the dictionary
transOriginDict = {}
# initialise that key in the dictionary
transOriginDict[pep] = []
# create the altered pep:
alteredPep = pep.replace('I', 'L')
# find the splits which could be combined to create the peptide using trans or cis splicing.
# transSplits is a list of tups, where each tuple contains two complimentary splits.
# transSplits = [('A', 'BCD'),('AB', 'CD'),('ABC', 'D')]
transSplits = findSplits(alteredPep)
# format splits so it iterates in the order we want it to.
transSplits = editTransSplits(transSplits, minTransLen)
# pepFound bool allows us to skip all splits with both entries under minTransLen if
# it has already been found.
pepFound = False
# iterate through each combination in transSplits
for splitCombo in transSplits:
# declare the two splits in the combo
split1 = splitCombo[0]
split2 = splitCombo[1]
# if the first entry is less than the min lengths and the pep has already been found
# we can break
if len(split2) < minTransLen and (pepFound or transOriginDict[pep]!=[]):
break
# declare holder for split1 location. If the corresponding split is greater than minTransLen, we want to
# initialise it as a list which will store possible locations. If it is smaller than minTransLen, we don't
# care about the location data just if it can be found or not. Thus we initialise it as False, and later
# will change it to True if it is founds anywhere.
if len(split1) >= minTransLen:
splitLoc1 = []
else:
splitLoc1 = False
# declare holder for split2 location in the same format as split1
if len(split2) >= minTransLen:
splitLoc2 = []
else:
splitLoc2 = False
# iterate through each protein in protDict.keys()
for protName, protSeq in protDict.items():
# replace all Is with Js as they are indeciferable on mass spec.
alteredProt = protSeq.replace('I', 'L')
# check for the presence of split1 and split2 in the same protSeq. If both exist, it is a cis or lin
# peptide so should be ignored from the trans output.
if split1 in alteredProt and split2 in alteredProt:
overlapLoc1 = []
for x in re.finditer(split1, alteredProt):
overlapLoc1.append([x.start(), x.end()-1])
overlapLoc2 = []
for x in re.finditer(split2, alteredProt):
overlapLoc2.append([x.start(), x.end() - 1])
for loc1 in overlapLoc1:
for loc2 in overlapLoc2:
if not overlapCheck(loc1, loc2):
transOriginDict[pep] = []
transOrigin.toWriteQueue.put(transOriginDict)
return
# check for presence of split1 in the current protein
# if splitLoc1 == True, we know that this split has been found and we can
# continue through without checking the current protein for it.
if splitLoc1 == True:
pass
# if it is not True, we check for its presence in alteredProt. We build up an iterable
# which stores the locations of the split in protein. If it can't be found it won't enter the
# for loop. If at least one can be found, splitLoc1 is either set to True, or the location
# information is appended to it.
else:
for x in re.finditer(split1, alteredProt):
if splitLoc1 == False:
splitLoc1 = True
break
else:
splitLoc1.append([protName, x.start(), x.end() - 1])
# check for the presence of split2, exactly the same as we have in splitLoc1
if splitLoc2 == True:
pass
else:
for x in re.finditer(split2, alteredProt):
if splitLoc2 == False:
splitLoc2 = True
break
else:
splitLoc2.append([protName, x.start(), x.end() - 1])
# When we get to here we have completed an iteration through every protein for the current combination of
# splits. We know need to decide what to do with splitLoc1 based on what data it holds.
# if either splitLoc variables equal False (one of the splits was less than minLenTrans and not found),
# we want to continue to the next iteration without doing anything.
if splitLoc1 == False or splitLoc2 == False:
continue
# if both splitLoc variables are True (both splits were found but both are smaller than minLenTrans),
# we want to update pepFound and continue without adding to transOriginsDict.
if splitLoc1 == True and splitLoc2 == True:
pepFound = True
continue
# we reach here if at least one of the splits was longer than minTransLen and thus the corresponding
# splitLoc was initialised as a list.
# if splitLoc1 is not an empty list and splitLoc2 is True, this split combination can be used to create
# the trans splicing of the pep. Thus, we update toAppend with the location data stored in splitLoc1.
if splitLoc1 != [] and splitLoc2 == True:
toAppend = splitLoc1
# same scenario as previous except splitLoc1 and 2 are switched.
elif splitLoc2 != [] and splitLoc1 == True:
toAppend = splitLoc2
# if we get to here both splitLocs must be lists. If they both aren't empty, both splits were found and
# thus both splitLoc data must be added to toAppend.
elif splitLoc1 != [] and splitLoc2 != []:
toAppend = splitLoc1 + splitLoc2
else:
continue
# add toAppend to transOriginDict. toAppend will be empty if no transSplicing was found involving
# splits of length greater than minTransLen.
transOriginDict[pep] += toAppend
# if after iterating through all splitCombos and all proteins for each combo no location data has been added,
# we must check if pepFound has been set to True to ensure no splitCombos with split lengths < minTransLen
# were found. If it is, simplt add True to the dictionary.
if transOriginDict[pep] == [] and pepFound:
transOriginDict[pep] = [True]
# add this transOriginDict (related to the given pep) to the writer function.
transOrigin.toWriteQueue.put(transOriginDict)
except Exception as e:
exc_buffer = io.StringIO()
traceback.print_exc(file=exc_buffer)
errorString = 'Uncaught exception in worker process: ' + pep + '\n%s'
logging.error(
errorString,
exc_buffer.getvalue())
raise e
def editTransSplits(splits, minTransLen):
"""
Called by transOrigin(), this function takes the splits that could be combined to create the current peptide,
and sorts them in the required order for the rest of the algorithm to work. We want the longest split to be first
in each pair, and we want the tuple pairs which have both splits with length less than minTransLen to be last
in the list.
:param splits: a list of tuples, where each tuple is a possible pair of subsequences which could be combined to
make the peptide.
:param minTransLen: the minimum length that a cleavage must be for its location to be reported in the origin data
of a trans spliced peptide.
:return splitsNew: splits sorted so that the longest split in each tuple appears first, and the tuples with both
splits less than minTransLen at the end of the list.
"""
#print(splits)
splits1 = []
splits2 = []
for tuple in splits:
# sort the tuple so that the longest split appears first so that it is checked first
tuple = sorted(tuple, key=len)
# we want the tuples which have both splits < MIN_TRANS_LEN to be at the end. We only run
# them if none of the previous tuples have been found.
if len(tuple[1]) < minTransLen:
splits2.append(tuple)
else:
splits1.append(tuple)
splitsNew = splits1 + splits2
#print(splitsNew)
return splitsNew
def processTransInitArgs(toWriteQueue):
"""
Called from findTransOrigins() when the pool is initialised, this function simply gives transOrigin() (the worker
function for each process in the pool) access to the toWriteQueue.
"""
transOrigin.toWriteQueue = toWriteQueue
def writer(toWriteQueue, outputPath, spliceType, protDict, cisTup = None):
"""
The writer function for linear spliced origin computation. This function simply pulls origin data to be written
to file from the toWriteQueue, stores it in the finalLinOriginDict and once all processes are finished, writes
the final output csv. finalLinOriginsData structure: { inputPeptide: locationDataStructure }
Note that the locationDataStructure differs for trans, linear and cis splicing.
:param toWriteQueue: the multiprocessing.Queue which the origin data constructed by linearOrigin() is pushed to
at the end of each process.
:param outputPath: the path of the linear output csv file.
:param spliceType: the type of splicing being run in the current iteration.
:param protDict: the dictionary containing all the input proteins, which is required when writing to file.
:param cisTup: a tuple containing overlapFlag at index 0 and maxDistance at index 1. overlapFlag is True if the
user has selected no overlap when running cis splicing. maxDistance is the maximum distance two cleavages can be
away from each other for them to combined in cis splicing. Will be 'None' if the user wants no maximum.
:return:
"""
finalLinOriginDict = {}
while True:
linOriginDict = toWriteQueue.get()
if linOriginDict == STOP:
logging.info("STOPPED writer queue")
break
for key, value in linOriginDict.items():
if key in finalLinOriginDict.keys():
finalLinOriginDict[key].append(linOriginDict[key])
else:
finalLinOriginDict[key] = linOriginDict[key]
if cisTup == None:
overlapFlag = None
maxDistance = 'None'
else:
overlapFlag = cisTup[0]
maxDistance = cisTup[1]
writeToFasta(finalLinOriginDict, outputPath, spliceType, protDict, overlapFlag, maxDistance)
def writeToFasta(originDict, outputPath, spliceType, protDict, overlapFlag, maxDistance):
"""
This function is called by writer(), and takes the ouptut data dictionary and writes it to the filePath given.
Also takes the spliceType argument to know how to format the csv and name it.
:param originDict: the dictionary containing the input peptides as keys and the location data as values. Structure:
originDict[peptide] = [(proteinName, locations),(proteinName, locations)...]
:param outputPath: the file location and file name to which the output is to be written.
:param spliceType: the type of splicing being computed. Each splice type has a different output syntax, and the
type of splicing also needs to be added to the output file name.
:param protDict: a dictionary containing the input protein data. This is needed to return slight differences in the
peptide and origin due to the program not treating I/J differently.
:param overlapFlag: True if the user has selected no overlap when running cis splicing.
:param maxDistance: the maximum distance two cleavages can be away from each other for them to combined in cis
splicing. Will be 'None' if the user wants no maximum.
:return:
"""
with open(outputPath, 'a', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
# write a title with the splice type for the entire document, and a blank row underneath.
title = spliceType + ' Peptide Locations'
writer.writerow([title])
writer.writerow([])
# there is a single listing for every unique location/combination that a peptide can come from.
# they are listed under the format of the header
header = ['Prot Name', 'Peptide', 'Pep in Prot', 'Location']
writer.writerow(header)
# iterate throught the originDict to write each entry to file.
for pep, origins in originDict.items():
# if origins = [], the peptide could not be found in the prot.fasta file using the given splice type.
if origins == []:
continue
# if origins contains data, an origin location was found and we must format the data accordingly.
else:
# call the data configuration function relevant to the splice type.
if spliceType == 'Linear':
dataRows = linDataRow(origins, pep, protDict)
elif spliceType == 'Cis':
dataRows = cisDataRowNew(origins, pep, protDict, overlapFlag, maxDistance)
else:
dataRows = transDataRow(origins, pep, protDict)
# write the formated data to the row.
for protData in dataRows:
writer.writerow(protData)
# write a blank row and repeat for the next peptide.
writer.writerow([])
def linDataRow(origins, pep, protDict):
"""
Called by writeToFasta, this function takes the linear origin data for a given peptide and formats it for writing
to the csv file.
:param origins: the data structure containing information on where a given peptide was found within the input
proteins. Has the form: [('ProtName1', [[location1]]), ('ProtName2', [[location1], [location2]])]
:param pep: the peptide which the origin data related to.
:param protDict: a dictionary containing the input protein data. This is needed to return slight differences in the
peptide and origin due to the program not treating I/J differently.
:return dataRows: a list of lists, where each sublist is a row of data which is to be written to file. Each sublist
has the format: [protName, peptide, pepInProt, location]:
"""
# iterate through each tuple (there is a tuple for every protein that was found to produce the peptide)
dataRows = []
for tuple in origins:
# initialise the first column of the row to be the name of the protein.
firstHalf = [tuple[0]]
# append the peptide as it exist
firstHalf.append(pep)
# the second element of the tuple stores the location data. For each location found in the current protein,
# add the information to the next column of the row.
for location in tuple[1]:
protPep = protDict[tuple[0]][location[0]:location[1]+1]
secondHalf = [protPep, location]
dataRow = firstHalf + secondHalf
# append dataRow to dataRows
dataRows.append(dataRow)
# return dataRows to be written to csv.
return dataRows
def cisDataRowNew(origins, pep, protDict, overlapFlag, maxDistance):
"""
Called by writeToFasta(), this function takes the cis origin data for a given peptide and formats it for writing
to the csv file.
:param origins: the data structure containing information on where a given peptide was found within the input
proteins. Has the form:
[('ProtName1', [[[b1 location1, b1 location2][y1 location1, y1 location2]],[[b3 location1],[y3 location1, y3 location2]]]), ('ProtName2', ....)]
:param pep: the peptide which the origin data related to.
:param protDict: a dictionary containing the input protein data. This is needed to return slight differences in the
peptide and origin due to the program not treating I/J differently.
:param overlapFlag: True if the user has selected no overlap when running cis splicing.
:param maxDistance: the maximum distance two cleavages can be away from each other for them to combined in cis
splicing. Will be 'None' if the user wants no maximum.
:return dataRows: a list of lists, where each sublist is a row of data which is to be written to file. Each sublist
has the format: [protName, peptide, pepInProt, location]
"""
dataRows = []
for tuple in origins:
dataRow = []
protName = tuple[0]
locationData = tuple[1]
firstHalf = [protName]
firstHalf.append(pep)
#print(firstHalf)
for splitCombo in locationData:
split1List = splitCombo[0]
split2List = splitCombo[1]
for split1 in split1List:
for split2 in split2List:
# check for overlap between the splits if user has prescribed no overlap
if overlapFlag:
if overlapCheck(split1, split2):
#print("Failed overlap")
#print(split1)
#print(split2)
continue
# if maxDistance is not None, we need to check if the splits meet the criteria.
if not maxDistance == 'None':
if checkMaxDistance(split1, split2, maxDistance):
#print("Failed Max Distance")
#print(split1)
#print(split2)
continue
prot = protDict[protName]
# check that they combine in the correct order
# check for a split of len1
if len(split1) == 1:
pepInProt = split1 + prot[split2[0]:split2[1]+1]
elif len(split2) == 1:
pepInProt = prot[split1[0]:split1[1] + 1] + split2
else:
pepInProt = prot[split1[0]:split1[1]+1] + prot[split2[0]:split2[1]+1]
location = str(split1) + ' and ' + str(split2)
secondHalf = [pepInProt]
secondHalf.append(location)
dataRow = firstHalf + secondHalf
dataRows.append(dataRow)
return dataRows
def overlapCheck(split1, split2):
"""
Called by cisDataRowNew() and editSingleAmino() to check if two split locations are overlapping. Returns True if
they do overlap.
:param split1: the first location, of the form [start, end].
:param split2: the second location, again of the form [start, end]
:return: True if the splits overlap, False if not.
"""
# if the function is recieving a single amino instead of the split reference list, return False as no overelap has
# already been assured.
if len(split1) == 1 or len(split2) == 1:
return False
split1Set = set(range(split1[0],split1[1]+1))
split2Set = set(range(split2[0], split2[1] + 1))
if len(split1Set.intersection(split2Set)) == 0:
return False
return True
def checkMaxDistance(split1, split2, maxDistance):
"""
This function takes two split reference lists which refer to cleavages involved in cis splicing and returns if they
are within the designated masDistance.
:param split1: a split reference list for cis splicing: [start, end]
:param split2: a split reference list for cis splicing: [start, end]
:param maxDistance: the maximum distance two cleavages can be a part in the original peptide for them to be combined
via cis splicing.
:return: True if the splits are not within the designated maxDistance.
"""
#print(maxDistance)
# if the function is recieving a single amino instead of the split reference list, return False as max distance has
# already been assured.
if len(split1) == 1 or len(split2) == 1:
return False
# if either checks pass the splits satisfy max distance.
if abs(split1[1] - split2[0]) <= maxDistance:
return False
if abs(split2[1] - split1[0]) <= maxDistance:
return False
return True
def transDataRow(origins, pep, protDict):
"""
Called by writeToFasta(), this function takes the trans origin data for a given peptide and formats it for writing
to the csv file.
:param origins: the data structure containing information on where a given peptide was found within the input
proteins. Has the form: [[protName, startIndex, endIndex]..] where each sublist refers to the location of an
individual cleavage which can be combined with another cleavage somewhere in the protein file to create the peptide.
:param pep: the peptide which the origin data related to.
:param protDict: a dictionary containing the input protein data. This is needed to return slight differences in the
peptide and origin due to the program not treating I/J differently.
:return dataRows: a list of lists, where each sublist is a row of data which is to be written to file. Each sublist
has the format: [protName, peptide, pepInProt, location]
"""
dataRows = []
for location in origins:
if location == True:
dataRow = [pep, "Formed only by cleavages under max length."]
dataRows.append(dataRow)
else:
protName = location[0]
startRef = location[1]
endRef = location[2] + 1
pepInProt = protDict[protName][startRef:endRef]
dataRow = [location[0], pep, pepInProt, [startRef + 1, endRef]]
dataRows.append(dataRow)
return dataRows |
fn_api_runner.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PipelineRunner using the SDK harness.
"""
from __future__ import absolute_import
import collections
import contextlib
import copy
import logging
import queue
import threading
import time
from builtins import object
from concurrent import futures
import grpc
import apache_beam as beam # pylint: disable=ungrouped-imports
from apache_beam import coders
from apache_beam import metrics
from apache_beam.coders import WindowedValueCoder
from apache_beam.coders import registry
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.internal import pickler
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners import runner
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.transforms import trigger
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import proto_utils
# This module is experimental. No backwards-compatibility guarantees.
ENCODED_IMPULSE_VALUE = beam.coders.WindowedValueCoder(
beam.coders.BytesCoder(),
beam.coders.coders.GlobalWindowCoder()).get_impl().encode_nested(
beam.transforms.window.GlobalWindows.windowed_value(b''))
IMPULSE_BUFFER_PREFIX = b'impulse:'
class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer):
_DONE = object()
def __init__(self):
self._push_queue = queue.Queue()
self._futures_by_id = dict()
self._read_thread = threading.Thread(
name='beam_control_read', target=self._read)
self._started = False
self._uid_counter = 0
def Control(self, iterator, context):
self._inputs = iterator
# Note: We only support one client for now.
self._read_thread.start()
self._started = True
while True:
to_push = self._push_queue.get()
if to_push is self._DONE:
return
yield to_push
def _read(self):
for data in self._inputs:
self._futures_by_id.pop(data.instruction_id).set(data)
def push(self, item):
if item is self._DONE:
future = None
else:
if not item.instruction_id:
self._uid_counter += 1
item.instruction_id = 'control_%s' % self._uid_counter
future = ControlFuture(item.instruction_id)
self._futures_by_id[item.instruction_id] = future
self._push_queue.put(item)
return future
def done(self):
self.push(self._DONE)
# Can't join a thread before it's started.
while not self._started:
time.sleep(.01)
self._read_thread.join()
class _GroupingBuffer(object):
"""Used to accumulate groupded (shuffled) results."""
def __init__(self, pre_grouped_coder, post_grouped_coder, windowing):
self._key_coder = pre_grouped_coder.key_coder()
self._pre_grouped_coder = pre_grouped_coder
self._post_grouped_coder = post_grouped_coder
self._table = collections.defaultdict(list)
self._windowing = windowing
def append(self, elements_data):
input_stream = create_InputStream(elements_data)
coder_impl = self._pre_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
# TODO(robertwb): We could optimize this even more by using a
# window-dropping coder for the data plane.
is_trivial_windowing = self._windowing.is_default()
while input_stream.size() > 0:
windowed_key_value = coder_impl.decode_from_stream(input_stream, True)
key, value = windowed_key_value.value
self._table[key_coder_impl.encode(key)].append(
value if is_trivial_windowing
else windowed_key_value.with_value(value))
def __iter__(self):
output_stream = create_OutputStream()
if self._windowing.is_default():
globally_window = GlobalWindows.windowed_value(None).with_value
windowed_key_values = lambda key, values: [globally_window((key, values))]
else:
trigger_driver = trigger.create_trigger_driver(self._windowing, True)
windowed_key_values = trigger_driver.process_entire_key
coder_impl = self._post_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for encoded_key, windowed_values in self._table.items():
key = key_coder_impl.decode(encoded_key)
for wkvs in windowed_key_values(key, windowed_values):
coder_impl.encode_to_stream(wkvs, output_stream, True)
return iter([output_stream.get()])
class _WindowGroupingBuffer(object):
"""Used to partition windowed side inputs."""
def __init__(self, side_input_data, coder):
# Here's where we would use a different type of partitioning
# (e.g. also by key) for a different access pattern.
if side_input_data.access_pattern == common_urns.side_inputs.ITERABLE.urn:
self._kv_extrator = lambda value: ('', value)
self._key_coder = coders.SingletonCoder('')
self._value_coder = coder.wrapped_value_coder
elif side_input_data.access_pattern == common_urns.side_inputs.MULTIMAP.urn:
self._kv_extrator = lambda value: value
self._key_coder = coder.wrapped_value_coder.key_coder()
self._value_coder = (
coder.wrapped_value_coder.value_coder())
else:
raise ValueError(
"Unknown access pattern: '%s'" % side_input_data.access_pattern)
self._windowed_value_coder = coder
self._window_coder = coder.window_coder
self._values_by_window = collections.defaultdict(list)
def append(self, elements_data):
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_value = self._windowed_value_coder.get_impl(
).decode_from_stream(input_stream, True)
key, value = self._kv_extrator(windowed_value.value)
for window in windowed_value.windows:
self._values_by_window[key, window].append(value)
def encoded_items(self):
value_coder_impl = self._value_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for (key, window), values in self._values_by_window.items():
encoded_window = self._window_coder.encode(window)
encoded_key = key_coder_impl.encode_nested(key)
output_stream = create_OutputStream()
for value in values:
value_coder_impl.encode_to_stream(value, output_stream, True)
yield encoded_key, encoded_window, output_stream.get()
class FnApiRunner(runner.PipelineRunner):
def __init__(self, use_grpc=False, sdk_harness_factory=None):
"""Creates a new Fn API Runner.
Args:
use_grpc: whether to use grpc or simply make in-process calls
defaults to False
sdk_harness_factory: callable used to instantiate customized sdk harnesses
typcially not set by users
"""
super(FnApiRunner, self).__init__()
self._last_uid = -1
self._use_grpc = use_grpc
if sdk_harness_factory and not use_grpc:
raise ValueError('GRPC must be used if a harness factory is provided.')
self._sdk_harness_factory = sdk_harness_factory
self._progress_frequency = None
def _next_uid(self):
self._last_uid += 1
return str(self._last_uid)
def run_pipeline(self, pipeline):
MetricsEnvironment.set_metrics_supported(False)
RuntimeValueProvider.set_runtime_options({})
# This is sometimes needed if type checking is disabled
# to enforce that the inputs (and outputs) of GroupByKey operations
# are known to be KVs.
from apache_beam.runners.dataflow.dataflow_runner import DataflowRunner
pipeline.visit(DataflowRunner.group_by_key_input_visitor())
return self.run_via_runner_api(pipeline.to_runner_api())
def run_via_runner_api(self, pipeline_proto):
return self.run_stages(*self.create_stages(pipeline_proto))
def create_stages(self, pipeline_proto):
# First define a couple of helpers.
def union(a, b):
# Minimize the number of distinct sets.
if not a or a == b:
return b
elif not b:
return a
else:
return frozenset.union(a, b)
class Stage(object):
"""A set of Transforms that can be sent to the worker for processing."""
def __init__(self, name, transforms,
downstream_side_inputs=None, must_follow=frozenset()):
self.name = name
self.transforms = transforms
self.downstream_side_inputs = downstream_side_inputs
self.must_follow = must_follow
self.timer_pcollections = []
def __repr__(self):
must_follow = ', '.join(prev.name for prev in self.must_follow)
downstream_side_inputs = ', '.join(
str(si) for si in self.downstream_side_inputs)
return "%s\n %s\n must follow: %s\n downstream_side_inputs: %s" % (
self.name,
'\n'.join(["%s:%s" % (transform.unique_name, transform.spec.urn)
for transform in self.transforms]),
must_follow,
downstream_side_inputs)
def can_fuse(self, consumer):
def no_overlap(a, b):
return not a.intersection(b)
return (
not self in consumer.must_follow
and not self.is_flatten() and not consumer.is_flatten()
and no_overlap(self.downstream_side_inputs, consumer.side_inputs()))
def fuse(self, other):
return Stage(
"(%s)+(%s)" % (self.name, other.name),
self.transforms + other.transforms,
union(self.downstream_side_inputs, other.downstream_side_inputs),
union(self.must_follow, other.must_follow))
def is_flatten(self):
return any(transform.spec.urn == common_urns.primitives.FLATTEN.urn
for transform in self.transforms)
def side_inputs(self):
for transform in self.transforms:
if transform.spec.urn == common_urns.primitives.PAR_DO.urn:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for side_input in payload.side_inputs:
yield transform.inputs[side_input]
def has_as_main_input(self, pcoll):
for transform in self.transforms:
if transform.spec.urn == common_urns.primitives.PAR_DO.urn:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
local_side_inputs = payload.side_inputs
else:
local_side_inputs = {}
for local_id, pipeline_id in transform.inputs.items():
if pcoll == pipeline_id and local_id not in local_side_inputs:
return True
def deduplicate_read(self):
seen_pcolls = set()
new_transforms = []
for transform in self.transforms:
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
pcoll = only_element(list(transform.outputs.items()))[1]
if pcoll in seen_pcolls:
continue
seen_pcolls.add(pcoll)
new_transforms.append(transform)
self.transforms = new_transforms
# Some helper functions.
def add_or_get_coder_id(coder_proto):
for coder_id, coder in pipeline_components.coders.items():
if coder == coder_proto:
return coder_id
new_coder_id = unique_name(pipeline_components.coders, 'coder')
pipeline_components.coders[new_coder_id].CopyFrom(coder_proto)
return new_coder_id
def windowed_coder_id(coder_id, window_coder_id):
proto = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.WINDOWED_VALUE.urn)),
component_coder_ids=[coder_id, window_coder_id])
return add_or_get_coder_id(proto)
safe_coders = {}
def length_prefix_unknown_coders(pcoll, pipeline_components):
"""Length prefixes coder for the given PCollection.
Updates pipeline_components to have a length prefixed coder for
every component coder within the PCollection that is not understood
natively by the runner. Also populates the safe_coders map with
a corresponding runner side coder which is also length prefixed but
compatible for the runner to instantiate.
"""
good_coder_urns = set(
value.urn for value in common_urns.coders.__dict__.values())
coders = pipeline_components.coders
for coder_id, coder_proto in coders.items():
if coder_proto.spec.spec.urn == common_urns.coders.BYTES.urn:
bytes_coder_id = coder_id
break
else:
bytes_coder_id = unique_name(coders, 'bytes_coder')
pipeline_components.coders[bytes_coder_id].CopyFrom(
beam.coders.BytesCoder().to_runner_api(None))
coder_substitutions = {}
def wrap_unknown_coders(coder_id, with_bytes):
if (coder_id, with_bytes) not in coder_substitutions:
wrapped_coder_id = None
coder_proto = coders[coder_id]
if coder_proto.spec.spec.urn == common_urns.coders.LENGTH_PREFIX.urn:
coder_substitutions[coder_id, with_bytes] = (
bytes_coder_id if with_bytes else coder_id)
elif coder_proto.spec.spec.urn in good_coder_urns:
wrapped_components = [wrap_unknown_coders(c, with_bytes)
for c in coder_proto.component_coder_ids]
if wrapped_components == list(coder_proto.component_coder_ids):
# Use as is.
coder_substitutions[coder_id, with_bytes] = coder_id
else:
wrapped_coder_id = unique_name(
coders,
coder_id + ("_bytes" if with_bytes else "_len_prefix"))
coders[wrapped_coder_id].CopyFrom(coder_proto)
coders[wrapped_coder_id].component_coder_ids[:] = [
wrap_unknown_coders(c, with_bytes)
for c in coder_proto.component_coder_ids]
coder_substitutions[coder_id, with_bytes] = wrapped_coder_id
else:
# Not a known coder.
if with_bytes:
coder_substitutions[coder_id, with_bytes] = bytes_coder_id
else:
wrapped_coder_id = unique_name(coders, coder_id + "_len_prefix")
len_prefix_coder_proto = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.LENGTH_PREFIX.urn)),
component_coder_ids=[coder_id])
coders[wrapped_coder_id].CopyFrom(len_prefix_coder_proto)
coder_substitutions[coder_id, with_bytes] = wrapped_coder_id
# This operation is idempotent.
if wrapped_coder_id:
coder_substitutions[wrapped_coder_id, with_bytes] = wrapped_coder_id
return coder_substitutions[coder_id, with_bytes]
new_coder_id = wrap_unknown_coders(pcoll.coder_id, False)
safe_coders[new_coder_id] = wrap_unknown_coders(pcoll.coder_id, True)
pcoll.coder_id = new_coder_id
# Now define the "optimization" phases.
def impulse_to_input(stages):
bytes_coder_id = add_or_get_coder_id(
beam.coders.BytesCoder().to_runner_api(None))
global_window_coder_id = add_or_get_coder_id(
beam.coders.coders.GlobalWindowCoder().to_runner_api(None))
globally_windowed_bytes_coder_id = windowed_coder_id(
bytes_coder_id, global_window_coder_id)
for stage in stages:
# First map Reads, if any, to Impulse + triggered read op.
for transform in list(stage.transforms):
if transform.spec.urn == common_urns.deprecated_primitives.READ.urn:
read_pc = only_element(transform.outputs.values())
read_pc_proto = pipeline_components.pcollections[read_pc]
impulse_pc = unique_name(
pipeline_components.pcollections, 'Impulse')
pipeline_components.pcollections[impulse_pc].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=impulse_pc,
coder_id=globally_windowed_bytes_coder_id,
windowing_strategy_id=read_pc_proto.windowing_strategy_id,
is_bounded=read_pc_proto.is_bounded))
stage.transforms.remove(transform)
# TODO(robertwb): If this goes multi-process before fn-api
# read is default, expand into split + reshuffle + read.
stage.transforms.append(
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Impulse',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.primitives.IMPULSE.urn),
outputs={'out': impulse_pc}))
stage.transforms.append(
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name,
spec=beam_runner_api_pb2.FunctionSpec(
urn=python_urns.IMPULSE_READ_TRANSFORM,
payload=transform.spec.payload),
inputs={'in': impulse_pc},
outputs={'out': read_pc}))
# Now map impulses to inputs.
for transform in list(stage.transforms):
if transform.spec.urn == common_urns.primitives.IMPULSE.urn:
stage.transforms.remove(transform)
impulse_pc = only_element(transform.outputs.values())
stage.transforms.append(
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=IMPULSE_BUFFER_PREFIX +
impulse_pc.encode('utf-8')),
outputs=transform.outputs))
yield stage
def lift_combiners(stages):
"""Expands CombinePerKey into pre- and post-grouping stages.
... -> CombinePerKey -> ...
becomes
... -> PreCombine -> GBK -> MergeAccumulators -> ExtractOutput -> ...
"""
for stage in stages:
assert len(stage.transforms) == 1
transform = stage.transforms[0]
if transform.spec.urn == common_urns.composites.COMBINE_PER_KEY.urn:
combine_payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.CombinePayload)
input_pcoll = pipeline_components.pcollections[only_element(
list(transform.inputs.values()))]
output_pcoll = pipeline_components.pcollections[only_element(
list(transform.outputs.values()))]
windowed_input_coder = pipeline_components.coders[
input_pcoll.coder_id]
element_coder_id, window_coder_id = (
windowed_input_coder.component_coder_ids)
element_coder = pipeline_components.coders[element_coder_id]
key_coder_id, _ = element_coder.component_coder_ids
accumulator_coder_id = combine_payload.accumulator_coder_id
key_accumulator_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.KV.urn)),
component_coder_ids=[key_coder_id, accumulator_coder_id])
key_accumulator_coder_id = add_or_get_coder_id(key_accumulator_coder)
accumulator_iter_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.ITERABLE.urn)),
component_coder_ids=[accumulator_coder_id])
accumulator_iter_coder_id = add_or_get_coder_id(
accumulator_iter_coder)
key_accumulator_iter_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.KV.urn)),
component_coder_ids=[key_coder_id, accumulator_iter_coder_id])
key_accumulator_iter_coder_id = add_or_get_coder_id(
key_accumulator_iter_coder)
precombined_pcoll_id = unique_name(
pipeline_components.pcollections, 'pcollection')
pipeline_components.pcollections[precombined_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=transform.unique_name + '/Precombine.out',
coder_id=windowed_coder_id(
key_accumulator_coder_id, window_coder_id),
windowing_strategy_id=input_pcoll.windowing_strategy_id,
is_bounded=input_pcoll.is_bounded))
grouped_pcoll_id = unique_name(
pipeline_components.pcollections, 'pcollection')
pipeline_components.pcollections[grouped_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=transform.unique_name + '/Group.out',
coder_id=windowed_coder_id(
key_accumulator_iter_coder_id, window_coder_id),
windowing_strategy_id=output_pcoll.windowing_strategy_id,
is_bounded=output_pcoll.is_bounded))
merged_pcoll_id = unique_name(
pipeline_components.pcollections, 'pcollection')
pipeline_components.pcollections[merged_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=transform.unique_name + '/Merge.out',
coder_id=windowed_coder_id(
key_accumulator_coder_id, window_coder_id),
windowing_strategy_id=output_pcoll.windowing_strategy_id,
is_bounded=output_pcoll.is_bounded))
def make_stage(base_stage, transform):
return Stage(
transform.unique_name,
[transform],
downstream_side_inputs=base_stage.downstream_side_inputs,
must_follow=base_stage.must_follow)
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Precombine',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.combine_components.COMBINE_PGBKCV.urn,
payload=transform.spec.payload),
inputs=transform.inputs,
outputs={'out': precombined_pcoll_id}))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Group',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.primitives.GROUP_BY_KEY.urn),
inputs={'in': precombined_pcoll_id},
outputs={'out': grouped_pcoll_id}))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Merge',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.combine_components
.COMBINE_MERGE_ACCUMULATORS.urn,
payload=transform.spec.payload),
inputs={'in': grouped_pcoll_id},
outputs={'out': merged_pcoll_id}))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/ExtractOutputs',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.combine_components
.COMBINE_EXTRACT_OUTPUTS.urn,
payload=transform.spec.payload),
inputs={'in': merged_pcoll_id},
outputs=transform.outputs))
else:
yield stage
def expand_gbk(stages):
"""Transforms each GBK into a write followed by a read.
"""
for stage in stages:
assert len(stage.transforms) == 1
transform = stage.transforms[0]
if transform.spec.urn == common_urns.primitives.GROUP_BY_KEY.urn:
for pcoll_id in transform.inputs.values():
length_prefix_unknown_coders(
pipeline_components.pcollections[pcoll_id], pipeline_components)
for pcoll_id in transform.outputs.values():
length_prefix_unknown_coders(
pipeline_components.pcollections[pcoll_id], pipeline_components)
# This is used later to correlate the read and write.
param = str("group:%s" % stage.name).encode('utf-8')
if stage.name not in pipeline_components.transforms:
pipeline_components.transforms[stage.name].CopyFrom(transform)
gbk_write = Stage(
transform.unique_name + '/Write',
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Write',
inputs=transform.inputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=param))],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
yield gbk_write
yield Stage(
transform.unique_name + '/Read',
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Read',
outputs=transform.outputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=param))],
downstream_side_inputs=stage.downstream_side_inputs,
must_follow=union(frozenset([gbk_write]), stage.must_follow))
else:
yield stage
def sink_flattens(stages):
"""Sink flattens and remove them from the graph.
A flatten that cannot be sunk/fused away becomes multiple writes (to the
same logical sink) followed by a read.
"""
# TODO(robertwb): Actually attempt to sink rather than always materialize.
# TODO(robertwb): Possibly fuse this into one of the stages.
pcollections = pipeline_components.pcollections
for stage in stages:
assert len(stage.transforms) == 1
transform = stage.transforms[0]
if transform.spec.urn == common_urns.primitives.FLATTEN.urn:
# This is used later to correlate the read and writes.
param = str("materialize:%s" % transform.unique_name).encode('utf-8')
output_pcoll_id, = list(transform.outputs.values())
output_coder_id = pcollections[output_pcoll_id].coder_id
flatten_writes = []
for local_in, pcoll_in in transform.inputs.items():
if pcollections[pcoll_in].coder_id != output_coder_id:
# Flatten inputs must all be written with the same coder as is
# used to read them.
pcollections[pcoll_in].coder_id = output_coder_id
transcoded_pcollection = (
transform.unique_name + '/Transcode/' + local_in + '/out')
yield Stage(
transform.unique_name + '/Transcode/' + local_in,
[beam_runner_api_pb2.PTransform(
unique_name=
transform.unique_name + '/Transcode/' + local_in,
inputs={local_in: pcoll_in},
outputs={'out': transcoded_pcollection},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.IDENTITY_DOFN_URN))],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
pcollections[transcoded_pcollection].CopyFrom(
pcollections[pcoll_in])
pcollections[transcoded_pcollection].coder_id = output_coder_id
else:
transcoded_pcollection = pcoll_in
flatten_write = Stage(
transform.unique_name + '/Write/' + local_in,
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Write/' + local_in,
inputs={local_in: transcoded_pcollection},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=param))],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
flatten_writes.append(flatten_write)
yield flatten_write
yield Stage(
transform.unique_name + '/Read',
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Read',
outputs=transform.outputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=param))],
downstream_side_inputs=stage.downstream_side_inputs,
must_follow=union(frozenset(flatten_writes), stage.must_follow))
else:
yield stage
def annotate_downstream_side_inputs(stages):
"""Annotate each stage with fusion-prohibiting information.
Each stage is annotated with the (transitive) set of pcollections that
depend on this stage that are also used later in the pipeline as a
side input.
While theoretically this could result in O(n^2) annotations, the size of
each set is bounded by the number of side inputs (typically much smaller
than the number of total nodes) and the number of *distinct* side-input
sets is also generally small (and shared due to the use of union
defined above).
This representation is also amenable to simple recomputation on fusion.
"""
consumers = collections.defaultdict(list)
all_side_inputs = set()
for stage in stages:
for transform in stage.transforms:
for input in transform.inputs.values():
consumers[input].append(stage)
for si in stage.side_inputs():
all_side_inputs.add(si)
all_side_inputs = frozenset(all_side_inputs)
downstream_side_inputs_by_stage = {}
def compute_downstream_side_inputs(stage):
if stage not in downstream_side_inputs_by_stage:
downstream_side_inputs = frozenset()
for transform in stage.transforms:
for output in transform.outputs.values():
if output in all_side_inputs:
downstream_side_inputs = union(
downstream_side_inputs, frozenset([output]))
for consumer in consumers[output]:
downstream_side_inputs = union(
downstream_side_inputs,
compute_downstream_side_inputs(consumer))
downstream_side_inputs_by_stage[stage] = downstream_side_inputs
return downstream_side_inputs_by_stage[stage]
for stage in stages:
stage.downstream_side_inputs = compute_downstream_side_inputs(stage)
return stages
def fix_side_input_pcoll_coders(stages):
"""Length prefix side input PCollection coders.
"""
for stage in stages:
for si in stage.side_inputs():
length_prefix_unknown_coders(
pipeline_components.pcollections[si], pipeline_components)
return stages
def greedily_fuse(stages):
"""Places transforms sharing an edge in the same stage, whenever possible.
"""
producers_by_pcoll = {}
consumers_by_pcoll = collections.defaultdict(list)
# Used to always reference the correct stage as the producer and
# consumer maps are not updated when stages are fused away.
replacements = {}
def replacement(s):
old_ss = []
while s in replacements:
old_ss.append(s)
s = replacements[s]
for old_s in old_ss[:-1]:
replacements[old_s] = s
return s
def fuse(producer, consumer):
fused = producer.fuse(consumer)
replacements[producer] = fused
replacements[consumer] = fused
# First record the producers and consumers of each PCollection.
for stage in stages:
for transform in stage.transforms:
for input in transform.inputs.values():
consumers_by_pcoll[input].append(stage)
for output in transform.outputs.values():
producers_by_pcoll[output] = stage
logging.debug('consumers\n%s', consumers_by_pcoll)
logging.debug('producers\n%s', producers_by_pcoll)
# Now try to fuse away all pcollections.
for pcoll, producer in producers_by_pcoll.items():
pcoll_as_param = str("materialize:%s" % pcoll).encode('utf-8')
write_pcoll = None
for consumer in consumers_by_pcoll[pcoll]:
producer = replacement(producer)
consumer = replacement(consumer)
# Update consumer.must_follow set, as it's used in can_fuse.
consumer.must_follow = frozenset(
replacement(s) for s in consumer.must_follow)
if producer.can_fuse(consumer):
fuse(producer, consumer)
else:
# If we can't fuse, do a read + write.
if write_pcoll is None:
write_pcoll = Stage(
pcoll + '/Write',
[beam_runner_api_pb2.PTransform(
unique_name=pcoll + '/Write',
inputs={'in': pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=pcoll_as_param))])
fuse(producer, write_pcoll)
if consumer.has_as_main_input(pcoll):
read_pcoll = Stage(
pcoll + '/Read',
[beam_runner_api_pb2.PTransform(
unique_name=pcoll + '/Read',
outputs={'out': pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=pcoll_as_param))],
must_follow=frozenset([write_pcoll]))
fuse(read_pcoll, consumer)
else:
consumer.must_follow = union(
consumer.must_follow, frozenset([write_pcoll]))
# Everything that was originally a stage or a replacement, but wasn't
# replaced, should be in the final graph.
final_stages = frozenset(stages).union(list(replacements.values()))\
.difference(list(replacements))
for stage in final_stages:
# Update all references to their final values before throwing
# the replacement data away.
stage.must_follow = frozenset(replacement(s) for s in stage.must_follow)
# Two reads of the same stage may have been fused. This is unneeded.
stage.deduplicate_read()
return final_stages
def inject_timer_pcollections(stages):
for stage in stages:
for transform in list(stage.transforms):
if transform.spec.urn == common_urns.primitives.PAR_DO.urn:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag in payload.timer_specs.keys():
if len(transform.inputs) > 1:
raise NotImplementedError('Timers and side inputs.')
input_pcoll = pipeline_components.pcollections[
next(iter(transform.inputs.values()))]
# Create the appropriate coder for the timer PCollection.
void_coder_id = add_or_get_coder_id(
beam.coders.SingletonCoder(None).to_runner_api(None))
timer_coder_id = add_or_get_coder_id(
beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.TIMER.urn)),
component_coder_ids=[void_coder_id]))
key_coder_id = input_pcoll.coder_id
if (pipeline_components.coders[key_coder_id].spec.spec.urn
== common_urns.coders.WINDOWED_VALUE.urn):
key_coder_id = pipeline_components.coders[
key_coder_id].component_coder_ids[0]
if (pipeline_components.coders[key_coder_id].spec.spec.urn
== common_urns.coders.KV.urn):
key_coder_id = pipeline_components.coders[
key_coder_id].component_coder_ids[0]
key_timer_coder_id = add_or_get_coder_id(
beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.KV.urn)),
component_coder_ids=[key_coder_id, timer_coder_id]))
timer_pcoll_coder_id = windowed_coder_id(
key_timer_coder_id,
pipeline_components.windowing_strategies[
input_pcoll.windowing_strategy_id].window_coder_id)
# Inject the read and write pcollections.
timer_read_pcoll = unique_name(
pipeline_components.pcollections,
'%s_timers_to_read_%s' % (transform.unique_name, tag))
timer_write_pcoll = unique_name(
pipeline_components.pcollections,
'%s_timers_to_write_%s' % (transform.unique_name, tag))
pipeline_components.pcollections[timer_read_pcoll].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=timer_read_pcoll,
coder_id=timer_pcoll_coder_id,
windowing_strategy_id=input_pcoll.windowing_strategy_id,
is_bounded=input_pcoll.is_bounded))
pipeline_components.pcollections[timer_write_pcoll].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=timer_write_pcoll,
coder_id=timer_pcoll_coder_id,
windowing_strategy_id=input_pcoll.windowing_strategy_id,
is_bounded=input_pcoll.is_bounded))
stage.transforms.append(
beam_runner_api_pb2.PTransform(
unique_name=timer_read_pcoll + '/Read',
outputs={'out': timer_read_pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=('timers:%s' % timer_read_pcoll).encode(
'utf-8'))))
stage.transforms.append(
beam_runner_api_pb2.PTransform(
unique_name=timer_write_pcoll + '/Write',
inputs={'in': timer_write_pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=('timers:%s' % timer_write_pcoll).encode(
'utf-8'))))
assert tag not in transform.inputs
transform.inputs[tag] = timer_read_pcoll
assert tag not in transform.outputs
transform.outputs[tag] = timer_write_pcoll
stage.timer_pcollections.append(
(timer_read_pcoll + '/Read', timer_write_pcoll))
yield stage
def sort_stages(stages):
"""Order stages suitable for sequential execution.
"""
seen = set()
ordered = []
def process(stage):
if stage not in seen:
seen.add(stage)
for prev in stage.must_follow:
process(prev)
ordered.append(stage)
for stage in stages:
process(stage)
return ordered
# Now actually apply the operations.
pipeline_components = copy.deepcopy(pipeline_proto.components)
# Reify coders.
# TODO(BEAM-2717): Remove once Coders are already in proto.
coders = pipeline_context.PipelineContext(pipeline_components).coders
for pcoll in pipeline_components.pcollections.values():
if pcoll.coder_id not in coders:
window_coder = coders[
pipeline_components.windowing_strategies[
pcoll.windowing_strategy_id].window_coder_id]
coder = WindowedValueCoder(
registry.get_coder(pickler.loads(pcoll.coder_id)),
window_coder=window_coder)
pcoll.coder_id = coders.get_id(coder)
coders.populate_map(pipeline_components.coders)
known_composites = set(
[common_urns.primitives.GROUP_BY_KEY.urn,
common_urns.composites.COMBINE_PER_KEY.urn])
def leaf_transforms(root_ids):
for root_id in root_ids:
root = pipeline_proto.components.transforms[root_id]
if root.spec.urn in known_composites:
yield root_id
elif not root.subtransforms:
# Make sure its outputs are not a subset of its inputs.
if set(root.outputs.values()) - set(root.inputs.values()):
yield root_id
else:
for leaf in leaf_transforms(root.subtransforms):
yield leaf
# Initial set of stages are singleton leaf transforms.
stages = [
Stage(name, [pipeline_proto.components.transforms[name]])
for name in leaf_transforms(pipeline_proto.root_transform_ids)]
# Apply each phase in order.
for phase in [
annotate_downstream_side_inputs, fix_side_input_pcoll_coders,
lift_combiners, expand_gbk, sink_flattens, greedily_fuse,
impulse_to_input, inject_timer_pcollections, sort_stages]:
logging.info('%s %s %s', '=' * 20, phase, '=' * 20)
stages = list(phase(stages))
logging.debug('Stages: %s', [str(s) for s in stages])
# Return the (possibly mutated) context and ordered set of stages.
return pipeline_components, stages, safe_coders
def run_stages(self, pipeline_components, stages, safe_coders):
if self._use_grpc:
controller = FnApiRunner.GrpcController(self._sdk_harness_factory)
else:
controller = FnApiRunner.DirectController()
metrics_by_stage = {}
try:
pcoll_buffers = collections.defaultdict(list)
for stage in stages:
metrics_by_stage[stage.name] = self.run_stage(
controller, pipeline_components, stage,
pcoll_buffers, safe_coders).process_bundle.metrics
finally:
controller.close()
return RunnerResult(runner.PipelineState.DONE, metrics_by_stage)
def run_stage(
self, controller, pipeline_components, stage, pcoll_buffers, safe_coders):
context = pipeline_context.PipelineContext(pipeline_components)
data_api_service_descriptor = controller.data_api_service_descriptor()
def extract_endpoints(stage):
# Returns maps of transform names to PCollection identifiers.
# Also mutates IO stages to point to the data ApiServiceDescriptor.
data_input = {}
data_side_input = {}
data_output = {}
for transform in stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
target = transform.unique_name, only_element(transform.outputs)
if pcoll_id.startswith(IMPULSE_BUFFER_PREFIX):
data_input[target] = [ENCODED_IMPULSE_VALUE]
else:
data_input[target] = pcoll_buffers[pcoll_id]
coder_id = pipeline_components.pcollections[
only_element(transform.outputs.values())].coder_id
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
target = transform.unique_name, only_element(transform.inputs)
data_output[target] = pcoll_id
coder_id = pipeline_components.pcollections[
only_element(transform.inputs.values())].coder_id
else:
raise NotImplementedError
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (
data_api_service_descriptor.url)
transform.spec.payload = data_spec.SerializeToString()
elif transform.spec.urn == common_urns.primitives.PAR_DO.urn:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag, si in payload.side_inputs.items():
data_side_input[transform.unique_name, tag] = (
'materialize:' + transform.inputs[tag],
beam.pvalue.SideInputData.from_runner_api(si, context))
return data_input, data_side_input, data_output
logging.info('Running %s', stage.name)
logging.debug(' %s', stage)
data_input, data_side_input, data_output = extract_endpoints(stage)
process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor(
id=self._next_uid(),
transforms={transform.unique_name: transform
for transform in stage.transforms},
pcollections=dict(pipeline_components.pcollections.items()),
coders=dict(pipeline_components.coders.items()),
windowing_strategies=dict(
pipeline_components.windowing_strategies.items()),
environments=dict(pipeline_components.environments.items()))
if controller.state_api_service_descriptor():
process_bundle_descriptor.state_api_service_descriptor.url = (
controller.state_api_service_descriptor().url)
# Store the required side inputs into state.
for (transform_id, tag), (pcoll_id, si) in data_side_input.items():
actual_pcoll_id = pcoll_id[len(b"materialize:"):]
value_coder = context.coders[safe_coders[
pipeline_components.pcollections[actual_pcoll_id].coder_id]]
elements_by_window = _WindowGroupingBuffer(si, value_coder)
for element_data in pcoll_buffers[pcoll_id]:
elements_by_window.append(element_data)
for key, window, elements_data in elements_by_window.encoded_items():
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
ptransform_id=transform_id,
side_input_id=tag,
window=window,
key=key))
controller.state_handler.blocking_append(state_key, elements_data)
def get_buffer(pcoll_id):
if (pcoll_id.startswith(b'materialize:')
or pcoll_id.startswith(b'timers:')):
if pcoll_id not in pcoll_buffers:
# Just store the data chunks for replay.
pcoll_buffers[pcoll_id] = list()
elif pcoll_id.startswith(b'group:'):
# This is a grouping write, create a grouping buffer if needed.
if pcoll_id not in pcoll_buffers:
original_gbk_transform = pcoll_id.split(b':', 1)[1]
transform_proto = pipeline_components.transforms[
original_gbk_transform]
input_pcoll = only_element(list(transform_proto.inputs.values()))
output_pcoll = only_element(list(transform_proto.outputs.values()))
pre_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[input_pcoll].coder_id]]
post_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[output_pcoll].coder_id]]
windowing_strategy = context.windowing_strategies[
pipeline_components
.pcollections[output_pcoll].windowing_strategy_id]
pcoll_buffers[pcoll_id] = _GroupingBuffer(
pre_gbk_coder, post_gbk_coder, windowing_strategy)
else:
# These should be the only two identifiers we produce for now,
# but special side input writes may go here.
raise NotImplementedError(pcoll_id)
return pcoll_buffers[pcoll_id]
result = BundleManager(
controller, get_buffer, process_bundle_descriptor,
self._progress_frequency).process_bundle(data_input, data_output)
while True:
timer_inputs = {}
for transform_id, timer_writes in stage.timer_pcollections:
windowed_timer_coder_impl = context.coders[
pipeline_components.pcollections[timer_writes].coder_id].get_impl()
written_timers = get_buffer(b'timers:' + timer_writes.encode('utf-8'))
if written_timers:
# Keep only the "last" timer set per key and window.
timers_by_key_and_window = {}
for elements_data in written_timers:
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_key_timer = windowed_timer_coder_impl.decode_from_stream(
input_stream, True)
key, _ = windowed_key_timer.value
# TODO: Explode and merge windows.
assert len(windowed_key_timer.windows) == 1
timers_by_key_and_window[
key, windowed_key_timer.windows[0]] = windowed_key_timer
out = create_OutputStream()
for windowed_key_timer in timers_by_key_and_window.values():
windowed_timer_coder_impl.encode_to_stream(
windowed_key_timer, out, True)
timer_inputs[transform_id, 'out'] = [out.get()]
written_timers[:] = []
if timer_inputs:
# The worker will be waiting on these inputs as well.
for other_input in data_input:
if other_input not in timer_inputs:
timer_inputs[other_input] = []
# TODO(robertwb): merge results
BundleManager(
controller,
get_buffer,
process_bundle_descriptor,
self._progress_frequency).process_bundle(timer_inputs, data_output)
else:
break
return result
# These classes are used to interact with the worker.
class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
def __init__(self):
self._lock = threading.Lock()
self._state = collections.defaultdict(list)
@contextlib.contextmanager
def process_instruction_id(self, unused_instruction_id):
yield
def blocking_get(self, state_key):
with self._lock:
return b''.join(self._state[self._to_key(state_key)])
def blocking_append(self, state_key, data):
with self._lock:
self._state[self._to_key(state_key)].append(data)
def blocking_clear(self, state_key):
with self._lock:
del self._state[self._to_key(state_key)]
@staticmethod
def _to_key(state_key):
return state_key.SerializeToString()
class GrpcStateServicer(
StateServicer, beam_fn_api_pb2_grpc.BeamFnStateServicer):
def State(self, request_stream, context=None):
# Note that this eagerly mutates state, assuming any failures are fatal.
# Thus it is safe to ignore instruction_reference.
for request in request_stream:
request_type = request.WhichOneof('request')
if request_type == 'get':
yield beam_fn_api_pb2.StateResponse(
id=request.id,
get=beam_fn_api_pb2.StateGetResponse(
data=self.blocking_get(request.state_key)))
elif request_type == 'append':
self.blocking_append(request.state_key, request.append.data)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
append=beam_fn_api_pb2.StateAppendResponse())
elif request_type == 'clear':
self.blocking_clear(request.state_key)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
clear=beam_fn_api_pb2.StateClearResponse())
else:
raise NotImplementedError('Unknown state request: %s' % request_type)
class SingletonStateHandlerFactory(sdk_worker.StateHandlerFactory):
"""A singleton cache for a StateServicer."""
def __init__(self, state_handler):
self._state_handler = state_handler
def create_state_handler(self, api_service_descriptor):
"""Returns the singleton state handler."""
return self._state_handler
def close(self):
"""Does nothing."""
pass
class DirectController(object):
"""An in-memory controller for fn API control, state and data planes."""
def __init__(self):
self.control_handler = self
self.data_plane_handler = data_plane.InMemoryDataChannel()
self.state_handler = FnApiRunner.StateServicer()
self.worker = sdk_worker.SdkWorker(
FnApiRunner.SingletonStateHandlerFactory(self.state_handler),
data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()), {})
self._uid_counter = 0
def push(self, request):
if not request.instruction_id:
self._uid_counter += 1
request.instruction_id = 'control_%s' % self._uid_counter
logging.debug('CONTROL REQUEST %s', request)
response = self.worker.do_instruction(request)
logging.debug('CONTROL RESPONSE %s', response)
return ControlFuture(request.instruction_id, response)
def done(self):
pass
def close(self):
pass
def data_api_service_descriptor(self):
return None
def state_api_service_descriptor(self):
return None
class GrpcController(object):
"""An grpc based controller for fn API control, state and data planes."""
def __init__(self, sdk_harness_factory=None):
self.sdk_harness_factory = sdk_harness_factory
self.control_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10))
self.control_port = self.control_server.add_insecure_port('[::]:0')
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
no_max_message_sizes = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
self.data_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10),
options=no_max_message_sizes)
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.state_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10),
options=no_max_message_sizes)
self.state_port = self.state_server.add_insecure_port('[::]:0')
self.control_handler = BeamFnControlServicer()
beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
self.data_plane_handler = data_plane.GrpcServerDataChannel()
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
self.state_handler = FnApiRunner.GrpcStateServicer()
beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server(
self.state_handler, self.state_server)
logging.info('starting control server on port %s', self.control_port)
logging.info('starting data server on port %s', self.data_port)
self.state_server.start()
self.data_server.start()
self.control_server.start()
self.worker = self.sdk_harness_factory(
'localhost:%s' % self.control_port
) if self.sdk_harness_factory else sdk_worker.SdkHarness(
'localhost:%s' % self.control_port, worker_count=1)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
logging.info('starting worker')
self.worker_thread.start()
def data_api_service_descriptor(self):
url = 'localhost:%s' % self.data_port
api_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
api_service_descriptor.url = url
return api_service_descriptor
def state_api_service_descriptor(self):
url = 'localhost:%s' % self.state_port
api_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
api_service_descriptor.url = url
return api_service_descriptor
def close(self):
self.control_handler.done()
self.worker_thread.join()
self.data_plane_handler.close()
self.control_server.stop(5).wait()
self.data_server.stop(5).wait()
self.state_server.stop(5).wait()
class BundleManager(object):
_uid_counter = 0
def __init__(
self, controller, get_buffer, bundle_descriptor, progress_frequency=None):
self._controller = controller
self._get_buffer = get_buffer
self._bundle_descriptor = bundle_descriptor
self._registered = False
self._progress_frequency = progress_frequency
def process_bundle(self, inputs, expected_outputs):
# Unique id for the instruction processing this bundle.
BundleManager._uid_counter += 1
process_bundle_id = 'bundle_%s' % BundleManager._uid_counter
# Register the bundle descriptor, if needed.
if not self._registered:
process_bundle_registration = beam_fn_api_pb2.InstructionRequest(
register=beam_fn_api_pb2.RegisterRequest(
process_bundle_descriptor=[self._bundle_descriptor]))
self._controller.control_handler.push(process_bundle_registration)
self._registered = True
# Write all the input data to the channel.
for (transform_id, name), elements in inputs.items():
data_out = self._controller.data_plane_handler.output_stream(
process_bundle_id, beam_fn_api_pb2.Target(
primitive_transform_reference=transform_id, name=name))
for element_data in elements:
data_out.write(element_data)
data_out.close()
# Actually start the bundle.
process_bundle = beam_fn_api_pb2.InstructionRequest(
instruction_id=process_bundle_id,
process_bundle=beam_fn_api_pb2.ProcessBundleRequest(
process_bundle_descriptor_reference=self._bundle_descriptor.id))
result_future = self._controller.control_handler.push(process_bundle)
with ProgressRequester(
self._controller, process_bundle_id, self._progress_frequency):
# Gather all output data.
expected_targets = [
beam_fn_api_pb2.Target(primitive_transform_reference=transform_id,
name=output_name)
for (transform_id, output_name), _ in expected_outputs.items()]
logging.debug('Gather all output data from %s.', expected_targets)
for output in self._controller.data_plane_handler.input_elements(
process_bundle_id, expected_targets):
target_tuple = (
output.target.primitive_transform_reference, output.target.name)
if target_tuple in expected_outputs:
self._get_buffer(expected_outputs[target_tuple]).append(output.data)
logging.debug('Wait for the bundle to finish.')
result = result_future.get()
if result.error:
raise RuntimeError(result.error)
return result
class ProgressRequester(threading.Thread):
def __init__(self, controller, instruction_id, frequency, callback=None):
super(ProgressRequester, self).__init__()
self._controller = controller
self._instruction_id = instruction_id
self._frequency = frequency
self._done = False
self._latest_progress = None
self._callback = callback
self.daemon = True
def __enter__(self):
if self._frequency:
self.start()
def __exit__(self, *unused_exc_info):
if self._frequency:
self.stop()
def run(self):
while not self._done:
try:
progress_result = self._controller.control_handler.push(
beam_fn_api_pb2.InstructionRequest(
process_bundle_progress=
beam_fn_api_pb2.ProcessBundleProgressRequest(
instruction_reference=self._instruction_id))).get()
self._latest_progress = progress_result.process_bundle_progress
if self._callback:
self._callback(self._latest_progress)
except Exception as exn:
logging.error("Bad progress: %s", exn)
time.sleep(self._frequency)
def stop(self):
self._done = True
class ControlFuture(object):
def __init__(self, instruction_id, response=None):
self.instruction_id = instruction_id
if response:
self._response = response
else:
self._response = None
self._condition = threading.Condition()
def set(self, response):
with self._condition:
self._response = response
self._condition.notify_all()
def get(self, timeout=None):
if not self._response:
with self._condition:
if not self._response:
self._condition.wait(timeout)
return self._response
class FnApiMetrics(metrics.metric.MetricResults):
def __init__(self, step_metrics):
self._counters = {}
self._distributions = {}
self._gauges = {}
for step_metric in step_metrics.values():
for ptransform_id, ptransform in step_metric.ptransforms.items():
for proto in ptransform.user:
key = metrics.execution.MetricKey(
ptransform_id,
metrics.metricbase.MetricName.from_runner_api(proto.metric_name))
if proto.HasField('counter_data'):
self._counters[key] = proto.counter_data.value
elif proto.HasField('distribution_data'):
self._distributions[
key] = metrics.cells.DistributionResult(
metrics.cells.DistributionData.from_runner_api(
proto.distribution_data))
elif proto.HasField('gauge_data'):
self._gauges[
key] = metrics.cells.GaugeResult(
metrics.cells.GaugeData.from_runner_api(
proto.gauge_data))
def query(self, filter=None):
counters = [metrics.execution.MetricResult(k, v, v)
for k, v in self._counters.items()
if self.matches(filter, k)]
distributions = [metrics.execution.MetricResult(k, v, v)
for k, v in self._distributions.items()
if self.matches(filter, k)]
gauges = [metrics.execution.MetricResult(k, v, v)
for k, v in self._gauges.items()
if self.matches(filter, k)]
return {'counters': counters,
'distributions': distributions,
'gauges': gauges}
class RunnerResult(runner.PipelineResult):
def __init__(self, state, metrics_by_stage):
super(RunnerResult, self).__init__(state)
self._metrics_by_stage = metrics_by_stage
self._user_metrics = None
def wait_until_finish(self, duration=None):
return self._state
def metrics(self):
if self._user_metrics is None:
self._user_metrics = FnApiMetrics(self._metrics_by_stage)
return self._user_metrics
def only_element(iterable):
element, = iterable
return element
def unique_name(existing, prefix):
if prefix in existing:
counter = 0
while True:
counter += 1
prefix_counter = prefix + "_%s" % counter
if prefix_counter not in existing:
return prefix_counter
else:
return prefix
|
__main__.py | import sys
import time
import config
import asyncio
from db import es_util
from controller import server
try:
import uvloop as async_loop
except ImportError:
async_loop = asyncio
def __handle_args():
if (len(sys.argv)) < 2:
return
for i in range(1, len(sys.argv), 2):
key = sys.argv[i][2:]
setattr(config, key, (type(getattr(config, key)))(sys.argv[i+1]))
processes = []
def __launch_process(func, args):
global processes
workers = config.workers
for _ in range(workers):
process = Process(target=func, args=args)
process.daemon = True #TODO custom id?
process.start()
processes.append(process)
for process in processes:
process.join()
#TODO stop all processes (Above calls should block)
#TODO https://github.com/channelcat/sanic/blob/master/sanic/sanic.py
#TODO signals and stopping
def __kill_processes():
global processes
for process in processes:
process.terminate()
if __name__ == '__main__':
__handle_args()
es_util.setup()
loop = async_loop.new_event_loop()
asyncio.set_event_loop(loop)
#TODO run nest in a different process with x process counts
print("Run run run")
server.start(loop)
|
test_server_async.py | #!/usr/bin/env python
from pymodbus.compat import IS_PYTHON3
import unittest
if IS_PYTHON3: # Python 3
from unittest.mock import patch, Mock, MagicMock
else: # Python 2
from mock import patch, Mock, MagicMock
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.server.asynchronous import ModbusTcpProtocol, ModbusUdpProtocol
from pymodbus.server.asynchronous import ModbusServerFactory
from pymodbus.server.asynchronous import (
StartTcpServer, StartUdpServer, StartSerialServer, StopServer,
_is_main_thread
)
from pymodbus.compat import byte2int
from pymodbus.transaction import ModbusSocketFramer
from pymodbus.exceptions import NoSuchSlaveException, ModbusIOException
import sys
#---------------------------------------------------------------------------#
# Fixture
#---------------------------------------------------------------------------#
import platform
from distutils.version import LooseVersion
IS_DARWIN = platform.system().lower() == "darwin"
OSX_SIERRA = LooseVersion("10.12")
if IS_DARWIN:
IS_HIGH_SIERRA_OR_ABOVE = LooseVersion(platform.mac_ver()[0])
SERIAL_PORT = '/dev/ptyp0' if not IS_HIGH_SIERRA_OR_ABOVE else '/dev/ttyp0'
else:
IS_HIGH_SIERRA_OR_ABOVE = False
SERIAL_PORT = "/dev/ptmx"
class AsynchronousServerTest(unittest.TestCase):
'''
This is the unittest for the pymodbus.server.asynchronous module
'''
#-----------------------------------------------------------------------#
# Setup/TearDown
#-----------------------------------------------------------------------#
def setUp(self):
'''
Initializes the test environment
'''
values = dict((i, '') for i in range(10))
identity = ModbusDeviceIdentification(info=values)
def tearDown(self):
''' Cleans up the test environment '''
pass
#-----------------------------------------------------------------------#
# Test ModbusTcpProtocol
#-----------------------------------------------------------------------#
def testTcpServerStartup(self):
''' Test that the modbus tcp asynchronous server starts correctly '''
with patch('twisted.internet.reactor') as mock_reactor:
if IS_PYTHON3:
console = False
call_count = 1
else:
console = True
call_count = 2
StartTcpServer(context=None, console=console)
self.assertEqual(mock_reactor.listenTCP.call_count, call_count)
self.assertEqual(mock_reactor.run.call_count, 1)
def testConnectionMade(self):
protocol = ModbusTcpProtocol()
protocol.transport = MagicMock()
protocol.factory = MagicMock()
protocol.factory.framer = ModbusSocketFramer
protocol.connectionMade()
self.assertIsInstance(protocol.framer, ModbusSocketFramer)
def testConnectionLost(self):
protocol = ModbusTcpProtocol()
protocol.connectionLost("What ever reason")
def testDataReceived(self):
protocol = ModbusTcpProtocol()
# mock_data = "Hellow world!"
mock_data = b"\x00\x01\x12\x34\x00\x04\xff\x02\x12\x34"
protocol.factory = MagicMock()
protocol.factory.control.ListenOnly = False
protocol.factory.store.slaves = MagicMock()
protocol.factory.store.single = True
protocol.factory.store.slaves.return_value = [byte2int(mock_data[6])]
protocol.framer = protocol._execute = MagicMock()
protocol.dataReceived(mock_data)
self.assertTrue(protocol.framer.processIncomingPacket.called)
# test datareceived returns None
protocol.factory.control.ListenOnly = False
self.assertEqual(protocol.dataReceived(mock_data), None)
def testTcpExecuteSuccess(self):
protocol = ModbusTcpProtocol()
protocol.store = MagicMock()
request = MagicMock()
protocol._send = MagicMock()
# tst if _send being called
protocol._execute(request)
self.assertTrue(protocol._send.called)
def testTcpExecuteFailure(self):
protocol = ModbusTcpProtocol()
protocol.factory = MagicMock()
protocol.factory.store = MagicMock()
protocol.store = MagicMock()
protocol.factory.ignore_missing_slaves = False
request = MagicMock()
protocol._send = MagicMock()
# CASE-1: test NoSuchSlaveException exceptions
request.execute.side_effect = NoSuchSlaveException()
protocol._execute(request)
self.assertTrue(request.doException.called)
# CASE-2: NoSuchSlaveException with ignore_missing_slaves = true
protocol.ignore_missing_slaves = True
request.execute.side_effect = NoSuchSlaveException()
self.assertEqual(protocol._execute(request), None)
# test other exceptions
request.execute.side_effect = ModbusIOException()
protocol._execute(request)
self.assertTrue(protocol._send.called)
def testSendTcp(self):
class MockMsg(object):
def __init__(self, msg, resp=False):
self.should_respond = resp
self.msg = msg
mock_msg = b"\x00\x01\x12\x34\x00\x04\xff\x02\x12\x34"
protocol = ModbusTcpProtocol()
mock_data = MockMsg(resp=True, msg=mock_msg)
protocol.control = MagicMock()
protocol.framer = MagicMock()
protocol.factory = MagicMock()
protocol.framer.buildPacket = MagicMock(return_value=mock_msg)
protocol.transport= MagicMock()
protocol._send(mock_data)
self.assertTrue(protocol.framer.buildPacket.called)
self.assertTrue(protocol.transport.write.called)
mock_data =MockMsg(resp=False, msg="helloworld")
self.assertEqual(protocol._send(mock_data), None)
#-----------------------------------------------------------------------#
# Test ModbusServerFactory
#-----------------------------------------------------------------------#
def testModbusServerFactory(self):
''' Test the base class for all the clients '''
factory = ModbusServerFactory(store=None)
self.assertEqual(factory.control.Identity.VendorName, '')
identity = ModbusDeviceIdentification(info={0x00: 'VendorName'})
factory = ModbusServerFactory(store=None, identity=identity)
self.assertEqual(factory.control.Identity.VendorName, 'VendorName')
#-----------------------------------------------------------------------#
# Test ModbusUdpProtocol
#-----------------------------------------------------------------------#
def testUdpServerInitialize(self):
protocol = ModbusUdpProtocol(store=None)
self.assertEqual(protocol.control.Identity.VendorName, '')
identity = ModbusDeviceIdentification(info={0x00: 'VendorName'})
protocol = ModbusUdpProtocol(store=None, identity=identity)
self.assertEqual(protocol.control.Identity.VendorName, 'VendorName')
def testUdpServerStartup(self):
''' Test that the modbus udp asynchronous server starts correctly '''
with patch('twisted.internet.reactor') as mock_reactor:
StartUdpServer(context=None)
self.assertEqual(mock_reactor.listenUDP.call_count, 1)
self.assertEqual(mock_reactor.run.call_count, 1)
@patch("twisted.internet.serialport.SerialPort")
def testSerialServerStartup(self, mock_sp):
''' Test that the modbus serial asynchronous server starts correctly '''
with patch('twisted.internet.reactor') as mock_reactor:
StartSerialServer(context=None, port=SERIAL_PORT)
self.assertEqual(mock_reactor.run.call_count, 1)
@patch("twisted.internet.serialport.SerialPort")
def testStopServerFromMainThread(self, mock_sp):
"""
Stop asynchronous server
:return:
"""
with patch('twisted.internet.reactor') as mock_reactor:
StartSerialServer(context=None, port=SERIAL_PORT)
self.assertEqual(mock_reactor.run.call_count, 1)
StopServer()
self.assertEqual(mock_reactor.stop.call_count, 1)
@patch("twisted.internet.serialport.SerialPort")
def testStopServerFromThread(self, mock_sp):
"""
Stop asynchronous server from child thread
:return:
"""
from threading import Thread
import time
with patch('twisted.internet.reactor') as mock_reactor:
StartSerialServer(context=None, port=SERIAL_PORT)
self.assertEqual(mock_reactor.run.call_count, 1)
t = Thread(target=StopServer)
t.start()
time.sleep(2)
self.assertEqual(mock_reactor.callFromThread.call_count, 1)
def testDatagramReceived(self):
mock_data = b"\x00\x01\x12\x34\x00\x04\xff\x02\x12\x34"
mock_addr = 0x01
protocol = ModbusUdpProtocol(store=None)
protocol.framer.processIncomingPacket = MagicMock()
protocol.control.ListenOnly = False
protocol._execute = MagicMock()
protocol.datagramReceived(mock_data, mock_addr)
self.assertTrue(protocol.framer.processIncomingPacket.called)
def testSendUdp(self):
protocol = ModbusUdpProtocol(store=None)
mock_data = b"\x00\x01\x12\x34\x00\x04\xff\x02\x12\x34"
mock_addr = 0x01
protocol.control = MagicMock()
protocol.framer = MagicMock()
protocol.framer.buildPacket = MagicMock(return_value=mock_data)
protocol.transport= MagicMock()
protocol._send(mock_data, mock_addr)
self.assertTrue(protocol.framer.buildPacket.called)
self.assertTrue(protocol.transport.write.called)
def testUdpExecuteSuccess(self):
protocol = ModbusUdpProtocol(store=None)
mock_addr = 0x01
protocol.store = MagicMock()
request = MagicMock()
protocol._send = MagicMock()
# tst if _send being called
protocol._execute(request, mock_addr)
self.assertTrue(protocol._send.called)
def testUdpExecuteFailure(self):
protocol = ModbusUdpProtocol(store=None)
mock_addr = 0x01
protocol.store = MagicMock()
request = MagicMock()
protocol._send = MagicMock()
# CASE-1: test NoSuchSlaveException exceptions
request.execute.side_effect = NoSuchSlaveException()
protocol._execute(request, mock_addr)
self.assertTrue(request.doException.called)
# CASE-2: NoSuchSlaveException with ignore_missing_slaves = true
protocol.ignore_missing_slaves = True
request.execute.side_effect = NoSuchSlaveException()
self.assertEqual(protocol._execute(request, mock_addr), None)
# test other exceptions
request.execute.side_effect = ModbusIOException()
protocol._execute(request, mock_addr)
self.assertTrue(protocol._send.called)
def testStopServer(self):
from twisted.internet import reactor
reactor.stop = MagicMock()
StopServer()
self.assertTrue(reactor.stop.called)
def testIsMainThread(self):
import threading
self.assertTrue(_is_main_thread())
# --------------------------------------------------------------------------- #
# Main
# --------------------------------------------------------------------------- #
if __name__ == "__main__":
unittest.main()
|
test_ftplib.py | """Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class and IPv6 environment
import ftplib
import threading
import asyncore
import asynchat
import socket
import io
from unittest import TestCase
from test import support
from test.support import HOST
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024).decode('ascii')
def handle_close(self):
self.baseclass.push('226 transfer complete')
self.close()
def push(self, what):
super(DummyDTPHandler, self).push(what.encode('ascii'))
class DummyFTPHandler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode('ascii')
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data.encode('ascii') + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
self.dtp.push(RETR_DATA)
self.dtp.close_when_done()
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler = self.handler(conn)
self.close()
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=2)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode('ascii'))
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storlines(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
self.assertEqual(self.server.handler.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send(b"1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost")
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost", timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
Video_Send.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pip install opencv-python
# pip install -U wxPython (problemas en Debian)
#
from __future__ import print_function
import time
import cv2
import numpy as np
import paho.mqtt.client as paho
import threading
import sys
import wx
import wx.lib.newevent
import Video_Send_wx as wxMainFrame
G_IS_CV3 = ( cv2.__version__[0] == '3' )
if( G_IS_CV3 ):
G_IMREAD_COLOR = cv2.IMREAD_COLOR
G_COLOR_BGR2RGB = cv2.COLOR_BGR2RGB
G_IMWRITE_JPEG_QUALITY = cv2.IMWRITE_JPEG_QUALITY
else:
G_IMREAD_COLOR = cv2.CV_LOAD_IMAGE_COLOR
G_COLOR_BGR2RGB = cv2.cv.CV_BGR2RGB
G_IMWRITE_JPEG_QUALITY = cv2.cv.CV_IMWRITE_JPEG_QUALITY
#### Cambiar aqui
G_CAP_WIDTH = 320
G_CAP_HEIGHT = 240
####
class MainApp( wx.App ):
def OnInit( self ):
# esto vienen de la GUI
self.CAP_DEVICE = 0
self.CAP_WIDTH = G_CAP_WIDTH
self.CAP_HEIGHT = G_CAP_HEIGHT
self.MQTT_SERVER = ''
self.MQTT_PORT = 0
self.MQTT_TOPIC = ''
# algunas definiciones
self.mqtt_client = paho.Client()
self.tx = False
self.mutex = threading.Lock()
self.image = None
# la tarea de captura y envio de frames
self.tSendVideo = None
self.running = threading.Event()
# preparamos la GUI
self.mainFrame = wxMainFrame.MainFrame( parent=None )
self.mainFrame.Bind( wx.EVT_CLOSE, self.OnClose )
self.mainFrame.Bind( wx.EVT_MENU, self.OnSalir, id=wxMainFrame.ID_SALIR )
self.mainFrame.Device.Bind( wx.EVT_CHOICE, self.OnDevice )
self.mainFrame.Send.Bind( wx.EVT_TOGGLEBUTTON, self.OnSend )
# eventos para que las operaciones GUI ocurran en el thread principal
self.timer = wx.Timer( self )
self.Bind( wx.EVT_TIMER, self._UpdateImage, self.timer )
self.timer.Start( 33 )
# levantamos la GUI
self.mainFrame.Show()
return True
return True
def OnSalir( self, event ):
self.mainFrame.Close()
# propagamos la salida
event.Skip()
def OnClose( self, event ):
# terminamos la tarea
self.running.clear()
if( self.tSendVideo is not None ):
self.tSendVideo.join()
self.tSendVideo = None
# quizas estabamos transmitiendo
self.mqtt_client.loop_stop()
self.mqtt_client.disconnect()
# propagamos la salida
event.Skip()
def OnDevice( self, event ):
# terminamos la tarea
self.running.clear()
if( self.tSendVideo is not None ):
self.tSendVideo.join()
self.tSendVideo = None
self.mainFrame.StatusBar.SetStatusText( '', 0 )
device = self.mainFrame.Device
choice = device.GetSelection()
if( choice != 0 ):
self.CAP_DEVICE = choice - 1
self._getGuiParams()
self.running.set()
self.tSendVideo = threading.Thread( target=self._TSendVideo, args=(), name='_TSendVideo' )
self.tSendVideo.start()
def OnSend( self, event ):
btnSend = self.mainFrame.Send
transmit = btnSend.GetValue()
if( transmit ):
self.mainFrame.StatusBar.SetStatusText( '', 0 )
self.mainFrame.Device.Disable()
self.mainFrame.Server.Disable()
self.mainFrame.Port.Disable()
self.mainFrame.Topic.Disable()
self.mainFrame.JpegQuality.Disable()
self.mainFrame.Cuadros.Disable()
self.mainFrame.Segundos.Disable()
btnSend.Disable()
self.MQTT_SERVER = self.mainFrame.Server.GetValue()
self.MQTT_PORT = self.mainFrame.Port.GetValue()
self.MQTT_TOPIC = self.mainFrame.Topic.GetValue()
try:
self.mqtt_client.connect( self.MQTT_SERVER, int( self.MQTT_PORT ) )
btnSend.SetLabel( 'Pausar' )
btnSend.Enable()
self.mqtt_client.loop_start()
self.tx = True
return
except Exception as e:
self.mainFrame.StatusBar.SetStatusText( repr( e ), 0 )
btnSend.SetValue( False )
btnSend.Enable()
self.tx = False
self.mqtt_client.loop_stop()
self.mqtt_client.disconnect()
self.mainFrame.Device.Enable()
self.mainFrame.Server.Enable()
self.mainFrame.Port.Enable()
self.mainFrame.Topic.Enable()
self.mainFrame.JpegQuality.Enable()
self.mainFrame.Cuadros.Enable()
self.mainFrame.Segundos.Enable()
btnSend.SetLabel( 'Transmitir' )
def _getGuiParams( self ):
cuadros = self.mainFrame.Cuadros.GetValue()
segundos = self.mainFrame.Segundos.GetValue()
self.mutex.acquire()
self._delay = float(segundos)/float(cuadros)
self._jpeg_quality = self.mainFrame.JpegQuality.GetValue()
self.mutex.release()
def _TSendVideo( self ):
# abrimos dispositivo de captura
cap = None
try:
cap = cv2.VideoCapture( self.CAP_DEVICE )
ret, img = cap.read()
if( not ret ):
raise ValueError( 'Invalid Device Index' )
except Exception as e:
wx.CallAfter( self.mainFrame.StatusBar.SetStatusText, repr( e ), 0 )
return
# dimensiones de la imagen
if( G_IS_CV3 ):
cap.set( cv2.CAP_PROP_FRAME_HEIGHT, self.CAP_HEIGHT )
cap.set( cv2.CAP_PROP_FRAME_WIDTH, self.CAP_WIDTH )
else:
cap.set( cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.CAP_HEIGHT )
cap.set( cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.CAP_WIDTH )
wx.CallAfter( self.mainFrame.Send.Enable )
# iniciamos la captura
t1 = 0.
while( self.running.isSet() ):
try:
# 1. se captura rapido pues internamente hay un buffer que puede producir lags a bajos FPS
# 2. poca luz genera demoras en la decodificación
ret, img = cap.read()
if( ret ):
t2 = time.time()
self.mutex.acquire()
delay = self._delay
self.mutex.release()
if( ( t2 - t1 ) >= delay ):
_, data = cv2.imencode( '.jpg', img, (G_IMWRITE_JPEG_QUALITY, self._jpeg_quality ) )
self.mutex.acquire()
self.image = data
self.mutex.release()
if( self.tx ):
self.mqtt_client.publish( self.MQTT_TOPIC, data.tostring() )
t1 = t2
except Exception as e:
print( repr( e ) )
wx.CallAfter( self._getGuiParams )
time.sleep( 0.010 )
# liberamos el dispositivo de captura
cap.release()
wx.CallAfter( self.mainFrame.Send.Disable )
print( 'Capture: Finalizado' )
sys.stdout.flush()
def _UpdateImage( self, evt ):
self.mutex.acquire()
img = self.image
self.image = None
self.mutex.release()
if( img is None ):
return
try:
# la procesamos
data = np.fromstring( img, np.uint8 )
img = cv2.imdecode( data, G_IMREAD_COLOR )
# donde desplegar
panel = self.mainFrame.CaptureImage
# la mostramos ajustado a la ventana
img = cv2.cvtColor( img, G_COLOR_BGR2RGB )
w, h = panel.Size
if( w>0 and h>0 ):
img = cv2.resize( img, (w, h) )
bmp = wx.Bitmap.FromBuffer( w, h, img )
dc = wx.ClientDC( panel )
dc.DrawBitmap( bmp, 0, 0 )
del dc
except Exception as e:
pass
# Show time
myApp = MainApp( False )
myApp.MainLoop()
|
cliente.py | import threading
import sys
import socket
import pickle
import os
class Cliente():
def __init__(self, host=socket.gethostname(), port=59989):
self.sock = socket.socket()
self.sock.connect((str(host), int(port)))
hilo_recv_mensaje = threading.Thread(target=self.recibir)
hilo_recv_mensaje.daemon = True
hilo_recv_mensaje.start()
print('Hilo con PID',os.getpid())
print('Hilos activos', threading.active_count())
while True:
msg = input('\nEscriba texto ? ** Enviar = ENTER ** Abandonar Chat = Q \n')
if msg != 'Q' :
self.enviar(msg)
else:
print(" **** TALOGOOO ****")
self.sock.close()
sys.exit()
def recibir(self):
while True:
try:
data = self.sock.recv(32)
if data:
print(pickle.loads(data))
except:
pass
def enviar(self, msg):
self.sock.send(pickle.dumps(msg))
c = Cliente() |
oauth_server.py | """Simple bottle server to receive authorization callback."""
import logging
import os
import signal
from threading import Thread
from typing import Any, NoReturn
from wsgiref.simple_server import WSGIRequestHandler, make_server
import bottle
_LOGGER = logging.getLogger(__name__)
CON = None
SERVER = None
# Enable non-HTTPS redirect URI for development/testing.
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
def start(connection) -> None:
"""Start the local websever for auth callbacks."""
# Allow Ctrl-C break
signal.signal(signal.SIGINT, signal.SIG_DFL)
global SERVER, CON
CON = connection
app = bottle.app()
try:
SERVER = MyWSGIRefServer(host="localhost", port=5000)
app.run(server=SERVER)
except Exception as exc: # pylint: disable=broad-except
_LOGGER.error(exc)
@bottle.route("/")
def login() -> NoReturn:
"""Prompt user to authenticate."""
redirect_uri = f"http://{SERVER.host}:{SERVER.port}/login/authorized"
(auth_url, _) = CON.get_authorization_url(redirect_uri=redirect_uri)
_LOGGER.debug("URL: %s, auth_url %s", redirect_uri, auth_url)
return bottle.redirect(auth_url)
@bottle.route("/login/authorized")
def callback() -> str:
"""Handle the application's Redirect Uri.
Request the token, some data to test the token and shutdown the server.
"""
CON.request_token(bottle.request.url)
res = CON.get("https://graph.microsoft.com/v1.0/me").json()
return SERVER.shutdown() + str(res)
class MyWSGIRefServer(bottle.WSGIRefServer):
"""WSGI server with shutdown."""
server = None
def run(self, app: Any) -> None:
"""Run the server."""
if self.quiet:
class QuietHandler(WSGIRequestHandler):
"""Quiet handler."""
def log_request(self, *_, **__): # pylint: disable=signature-differs
pass
self.options["handler_class"] = QuietHandler
self.server = make_server(self.host, self.port, app, **self.options)
self.server.serve_forever(poll_interval=0.5)
def shutdown(self) -> str:
"""Shutdown the server in another thread."""
Thread(target=self.server.shutdown).start()
return "BYE"
|
add_geoinfo.py | """
需要离线添加一些ip的信息
by swm 2020/04/24
"""
import queue
import threading
import json
import time
import uuid
from pathlib import Path
import argparse
import maxminddb
class AddInfo(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument("pthreads", help="55", type=int)
self.args = self.parser.parse_args()
# 需要读取的文件夹
self.input_file_path = Path('./hkdata')
if not self.input_file_path.exists():
raise Exception("Error input path")
# self.input_file_path.mkdir(exist_ok=True, parents=True)
self.tmp_path = Path('./_tmppath')
self.tmp_path.mkdir(exist_ok=True, parents=True)
self.output_path = Path('./_outputpath')
self.output_path.mkdir(exist_ok=True, parents=True)
# 线程锁
self.process_locker = threading.Lock()
# 读取文件的队列
self.task_queue = queue.Queue()
# 输出文件队列
self.output_queue = queue.Queue()
# 处理的极限
self.limit = 5
# 文件后缀名
self.file_suffix = 'iscan_search'
# 线程数量的控制,根据实际需求来改变
self.max_threads = int(self.args.pthreads)
def read_file(self, file_path: Path):
"""
读取文件里面的内容
:param file_path:
:return:
"""
res = file_path.read_text(encoding='utf-8')
return json.loads(res)
def start_scan_file(self):
"""
扫描数据的线程
:return:
"""
try:
for file in self.input_file_path.iterdir():
# 这里拿到的是一个file的名字
if file.suffix == '.' + self.file_suffix:
name = file.name
print(f"Get file :{name}")
# 移动到tmp文件夹
tmpname = self.tmp_path / name
file.replace(tmpname)
try:
res_dict = self.read_file(tmpname)
except Exception as err:
print(f'some error happened, err:{err}')
continue
self.task_queue.put([res_dict, tmpname])
else:
file.unlink()
continue
print("Complete process data")
except Exception as error:
print(f"Read file error, err:{error}")
finally:
# 1秒去扫描一次
time.sleep(1)
def get_ip_geoinfo(self, ip: str) -> dict:
"""
根据ip去查询所在的地址
:param ip:
:return:
"""
res = {}
try:
# 1、先检测下东西在不在
# 这样当前dbname就一定会有
# with DbipMmdb.__scouter_ip_locker:
# 一次只能有一个玩意在这检测版本,虽然慢但是多线程没办法
# self.__check_db_version()
reader = maxminddb.open_database('./_ip_location_dbs/dbfile/2020-05.mmdb')
res = reader.get(ip)
if res is None:
return res
reader.close()
except Exception as error:
print("Get geoinfo error:\nip={}\nerror:{}".format(ip, error))
finally:
return res
def process_dns(self, file_info: list):
"""
开始处理dns
:param file_info:
:return:
"""
task = file_info[0]
filename: Path = file_info[1]
ip = task.get('ip')
print(f'Start get ip info: {ip}')
try:
with self.process_locker:
res_dict = self.get_ip_geoinfo(ip)
tgeo = task.get('geoinfo')
# 里面为空或者是里面没有东西
if tgeo is None or len(tgeo) == 0:
geodict = {}
# 最近的数据没有拿到geoinfo,手动增加下
data_city_en = res_dict.get('city', {}).get('names', {}).get('en', None)
data_city_cn = res_dict.get('city', {}).get('names', {}).get('zh-CN', None)
geodict['city'] = {'names': {'en': data_city_en, 'zh-CN': data_city_cn}}
data_state_en = None
data_state_cn = None
subdivisions = res_dict.get('subdivisions', [])
if len(subdivisions) > 0:
data_state_en = subdivisions[0].get('names', {}).get('en', None)
data_state_cn = subdivisions[0].get('names', {}).get('zh-CN', None)
geodict['province'] = {'names': {'en': data_state_en, 'zh-CN': data_state_cn}}
data_country_code = res_dict.get('country', {}).get('iso_code', None)
data_country_en = res_dict.get('country', {}).get('names', {}).get('en')
data_country_cn = res_dict.get('country', {}).get('names', {}).get('zh-CN')
geodict['country'] = {'code': data_country_code, 'names': {'en': data_country_en, 'zh-CN': data_country_cn}}
location = res_dict.get('location', {})
lng = location.get('longitude', None)
lat = location.get('latitude', None)
geodict['location'] = {'lat': lat, 'lon': lng}
ctinfo = res_dict.get('continent', {})
ctcode = ctinfo.get('code')
ctname_en = ctinfo.get('names', {}).get('en')
ctname_cn = ctinfo.get('names', {}).get('zh-CN')
geodict['continent'] = {'code': ctcode, 'names': {'en': ctname_en, 'zh-CN': ctname_cn}}
globalcode = 852
timezone = 0
geodict['globaltelcode'] = globalcode
geodict['timezone'] = timezone
task['geoinfo'] = geodict
traits_info = res_dict.get('traits', {})
org = traits_info.get('organization', None)
isp = traits_info.get('isp', None)
if org is not None:
task['org'] = org
if isp is not None:
task['isp'] = isp
except Exception as err:
print(err)
finally:
# 处理完成后删除文件
self.output_queue.put(task)
filename.unlink()
def start_process_file(self):
"""
处理队列里面堆积的文件
可开多线程
:return:
"""
while True:
if self.task_queue.empty():
time.sleep(0.5)
continue
try:
task = self.task_queue.get()
self.process_dns(task)
except Exception as error:
print(f"Process file error, err:{error}")
finally:
self.task_queue.task_done()
def outputdata(self):
while True:
if self.output_queue.empty():
time.sleep(0.5)
continue
try:
task = self.output_queue.get()
tmpname = None
lines = json.dumps(task, ensure_ascii=False)
try:
# --------------------------------------------tmppath
tmpname = self.tmp_path / f'{uuid.uuid1()}.{self.file_suffix}'
while tmpname.exists():
tmpname = self.tmp_path / f'{uuid.uuid1()}.{self.file_suffix}'
tmpname.write_text(lines.strip(), encoding='utf-8')
# ------------------------------------------outputpath
outname = self.output_path / f'{uuid.uuid1()}.{self.file_suffix}'
while outname.exists():
outname = self.output_path / f'{uuid.uuid1()}.{self.file_suffix}'
# 将tmppath 移动到outputpath
tmpname.replace(outname)
tmpname = None # 表示移动成功了,移动不会出问题
print(f'Output file {outname}')
except Exception as error:
raise Exception(error)
finally:
if tmpname is not None:
tmpname.unlink()
except Exception as error:
print(f"outputdata error, err:{error}")
continue
finally:
self.output_queue.task_done()
if __name__ == '__main__':
dd = AddInfo()
print(f'Start {dd.max_threads} process threads')
time.sleep(1)
try:
thread1 = threading.Thread(target=dd.start_scan_file, name="scantask")
thread1.start()
for i in range(dd.max_threads):
thread2 = threading.Thread(target=dd.start_process_file, name="processtask")
thread2.start()
thread3 = threading.Thread(target=dd.outputdata, name='outputdata')
thread3.start()
print('Start............')
except(KeyboardInterrupt):
print('Force to stop by keyboard')
|
job_control.py | import collections
import logging
import threading
class Job:
def execute(self): pass
def request_stop(self): pass
class Agent:
"""
The name serves as the unique identifier. When the job finishes, the
callback is invoked with self (this Agent) as the only parameter.
"""
def __init__(self, job, callback, name=None):
self._job = job
self._callback = callback
self._thread = None
self._name = name or 'job {}'.format(id(self))
@property
def name(self):
return self._name
@property
def job(self):
return self._job
def is_running(self):
return self._thread is not None and self._thread.is_alive()
def execute(self):
self._thread = threading.Thread(target=self._execute_and_call)
self._thread.start()
return self
def request_stop(self):
self._job.request_stop()
def _execute_and_call(self):
try:
self._job.execute()
finally:
self._callback(self)
def failed_job() -> Agent:
class FailedJob:
def execute(self):
return False
def request_stop(self):
return True
callback = lambda _: None
return Agent(FailedJob(), callback, "failed job")
class JobControl:
"""
Jobs are pulled out from the left (front of the queue). add_job() appends
one to the end (right side), while insert_job() inserts it in front (left
side).
"""
def __init__(self):
self._background = {}
self._active_agent = None
self._queue = collections.deque()
self._lock = threading.RLock()
def clear_queue(self) -> None:
self._queue.clear()
def add_job(self, job, name=None):
return self._enqueue_job(job, self._queue.append, name)
def insert_job(self, job, name=None):
return self._enqueue_job(job, self._queue.appendleft, name)
def spawn_job(self, job, name):
agent = None
if self._acquire_lock():
try:
agent = Agent(job, self._on_background_done, name)
self._background[agent.name] = agent
agent.execute()
finally:
self._release_lock()
return agent
def get_queued(self):
return list(self._queue) if self._queue is not None else None
def get_background(self):
if self._background is None:
return None
return self._background.values()
def get_current(self):
return self._active_agent
def is_running(self, name) -> bool:
if self._active_agent is not None and self._active_agent.name == name:
return True
return name in self._background
def stop_background(self) -> bool:
result = False
if self._acquire_lock():
result = True
try:
# Get a copy to avoid iterating over a list that's undergoing
# deletions.
agents = self.get_background()
if agents is not None:
for agent in list(agents).copy():
agent.request_stop()
finally:
self._release_lock()
return result
def stop_job(self, name) -> bool:
result = False
if self._acquire_lock():
try:
if (self._active_agent is not None and
self._active_agent.name == name):
self._active_agent.request_stop()
result = True
elif name in self._background:
self._background[name].request_stop()
result = True
finally:
self._release_lock()
return result
def stop_current(self) -> bool:
if self._active_agent is not None and self._active_agent.is_running():
if self._acquire_lock():
try:
self._active_agent.request_stop()
finally:
self._release_lock()
return True
return False
def has_jobs(self) -> bool:
return (len(self._queue) > 0 or len(self._background) > 0 or
self._active_agent is not None)
def _run_next_job(self) -> None:
if self._acquire_lock():
try:
if self._active_agent is None and len(self._queue) > 0:
self._active_agent = self._queue.popleft()
self._active_agent.execute()
finally:
self._release_lock()
def _enqueue_job(self, job, append_fn, name):
agent = None
if self._acquire_lock():
try:
agent = Agent(job, self._on_execution_done, name)
append_fn(agent)
if self._active_agent is None:
self._run_next_job()
finally:
self._lock.release()
return agent
def _on_execution_done(self, _):
if self._acquire_lock():
try:
self._active_agent = None
finally:
self._release_lock()
self._run_next_job()
def _on_background_done(self, agent):
if self._acquire_lock():
try:
del self._background[agent.name]
finally:
self._release_lock()
def _acquire_lock(self):
if not self._lock.acquire(True, 1.0):
logging.error("Unable to acquire lock.")
return False
return True
def _release_lock(self):
self._lock.release()
|
tests.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import errno
import os
import shutil
import sys
import tempfile
import time
import unittest
from datetime import datetime, timedelta
try:
import threading
except ImportError:
import dummy_threading as threading
from django.core.cache import cache
from django.core.exceptions import SuspiciousOperation
from django.core.files.base import File, ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (InMemoryUploadedFile, SimpleUploadedFile,
TemporaryUploadedFile)
from django.test import LiveServerTestCase, SimpleTestCase
from django.test import override_settings
from django.utils import six
from django.utils.six.moves.urllib.request import urlopen
from django.utils._os import upath
from .models import Storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with six.assertRaisesRegex(self, ImportError, "No module named '?storage'?"):
get_storage_class('storage.NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
self.assertRaises(ImportError, get_storage_class,
'django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with six.assertRaisesRegex(self, ImportError,
"No module named '?(django.core.files.)?non_existing_storage'?"):
get_storage_class(
'django.core.files.non_existing_storage.NonExistingStorage')
class FileStorageDeconstructionTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, tuple())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
class FileStorageTests(unittest.TestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir,
base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, upath(os.getcwd()))
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def test_file_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
atime = self.storage.accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(
os.path.getatime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.accessed_time(f_name), timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_created_time(self):
"""
File storage returns a Datetime object for the creation time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
ctime = self.storage.created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(
os.path.getctime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.created_time(f_name), timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_modified_time(self):
"""
File storage returns a Datetime object for the last modified time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
mtime = self.storage.modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(
os.path.getmtime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.modified_time(f_name), timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file',
ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(six.StringIO('1'), '', 'test',
'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name),
os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'),
'%s%s' % (self.storage.base_url, 'test.file'))
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(self.storage.url(r"""~!*()'@#$%^&*abc`+ =.file"""),
"""/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file""")
# should stanslate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""),
"""/test_media_url/a/b/c.file""")
self.storage.base_url = None
self.assertRaises(ValueError, self.storage.url, 'test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir,
base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files),
{'storage_test_1', 'storage_test_2'})
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
self.assertRaises(SuspiciousOperation, self.storage.exists, '..')
self.assertRaises(SuspiciousOperation, self.storage.exists, '/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case),
other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise OSError(errno.EEXIST, 'simulated EEXIST')
elif path == os.path.join(self.temp_dir, 'error'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file',
ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file',
ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Check that OSErrors aside from EEXIST are still raised.
self.assertRaises(OSError,
self.storage.save, 'error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise OSError(errno.ENOENT, 'simulated ENOENT')
elif path == os.path.join(self.temp_dir, 'error.file'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Check that OSErrors aside from ENOENT are still raised.
self.storage.save('error.file', ContentFile('delete with error'))
self.assertRaises(OSError, self.storage.delete, 'error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
class CustomStorage(FileSystemStorage):
def get_available_name(self, name):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class FileFieldStorageTests(unittest.TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def test_files(self):
# Attempting to access a FileField from the class raises a descriptive
# error
self.assertRaises(AttributeError, lambda: Storage.normal)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
self.assertRaises(ValueError, lambda: obj1.normal.size)
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
six.assertRegex(self, obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
six.assertRegex(self, obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
six.assertRegex(self, names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "./django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = six.StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
six.assertRegex(self, files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
six.assertRegex(self, files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
six.assertRegex(self, files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
Test that ContentFile can be saved correctly with the filesystem storage,
both if it was initialized with string or unicode content"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
test_base.py | #!/usr/bin/env python
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pyexpect.replwrap will not work with unicode_literals
#from __future__ import unicode_literals
import copy
import os
import psutil
import random
import re
import shutil
import signal
import subprocess
import sys
import time
import threading
import unittest
import pexpect
try:
from pexpect.replwrap import REPLWrapper
except ImportError as e:
print("Could not import pexpect.replwrap: %s" % (str(e)))
print(" Need pexpect version 3.3, installed version: %s" % (
str(pexpect.__version__)))
print(" pexpect location: %s" % (str(pexpect.__file__)))
exit(1)
try:
import argparse
except ImportError:
print ("Cannot import argparse: pip install argparse?")
exit(1)
try:
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
except ImportError:
print ("Cannot import thrift: pip install thrift?")
exit(1)
'''Defaults that should be used in integration tests.'''
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_DIR = "/tmp/osquery-tests/"
CONFIG_NAME = CONFIG_DIR + "tests"
DEFAULT_CONFIG = {
"options": {
"database_path": "%s.db" % CONFIG_NAME,
"pidfile": "%s.pid" % CONFIG_NAME,
"config_path": "%s.conf" % CONFIG_NAME,
"extensions_socket": "%s.em" % CONFIG_NAME,
"extensions_interval": "1",
"extensions_timeout": "0",
"watchdog_level": "3",
"disable_logging": "true",
"force": "true",
},
"schedule": {},
}
# osquery-specific python tooling and utilities
import utils
'''Expect CONFIG to be set during Tester.main() to a python dict.'''
CONFIG = None
'''Expect ARGS to contain the argparsed namespace.'''
ARGS = None
class OsqueryUnknownException(Exception):
'''Exception thrown for unknown output from the shell'''
pass
class OsqueryException(Exception):
'''Exception thrown when the shell returns an error'''
pass
class OsqueryWrapper(REPLWrapper):
'''A pexpect wrapper intended for interacting with the osqueryi REPL'''
PROMPT = u'osquery> '
CONTINUATION_PROMPT = u' ...> '
ERROR_PREFIX = 'Error:'
def __init__(self, command='../osqueryi', args={}, env={}):
global CONFIG_NAME, CONFIG
options = copy.deepcopy(CONFIG)["options"]
for option in args.keys():
options[option] = args[option]
options["database_path"] += str(random.randint(1000, 9999))
command = command + " " + " ".join(["--%s=%s" % (k, v) for
k, v in options.iteritems()])
proc = pexpect.spawn(command, env=env)
super(OsqueryWrapper, self).__init__(
proc,
self.PROMPT,
None,
continuation_prompt=self.CONTINUATION_PROMPT)
def run_query(self, query):
'''Run a query, returning the results as a list of dictionaries
When unknown output is encountered, OsqueryUnknownException is thrown.
When osqueryi returns an error, OsqueryException is thrown.
'''
query = query + ';' # Extra semicolon causes no harm
result = self.run_command(query)
# On Mac, the query appears first in the string. Remove it if so.
result = re.sub(re.escape(query), '', result).strip()
result_lines = result.splitlines()
if len(result_lines) < 1:
raise OsqueryUnknownException(
'Unexpected output:\n %s' % result_lines)
if result_lines[0].startswith(self.ERROR_PREFIX):
raise OsqueryException(result_lines[0])
try:
header = result_lines[1]
columns = re.findall('[^ |]+', header)
rows = []
for line in result_lines[3:-1]:
values = re.findall('[^ |]+', line)
rows.append(
dict((col, val) for col, val in zip(columns, values)))
return rows
except:
raise OsqueryUnknownException(
'Unexpected output:\n %s' % result_lines)
class ProcRunner(object):
'''A helper class to open a subprocess and perform testing actions.
The subprocess is opened in a new thread and state is tracked using
this class wrapper.
'''
def __init__(self, name, path, _args=[], interval=0.02, silent=False):
self.started = False
self.proc = None
self.name = name
self.path = path
self.args = _args
self.interval = interval
self.silent = silent
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
pid = 0
code = -1
try:
if self.silent:
self.proc = subprocess.Popen([self.path] + self.args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
self.proc = subprocess.Popen([self.path] + self.args)
pid = self.proc.pid
self.started = True
except Exception as e:
print (utils.red("Process start failed:") + " %s" % self.name)
print (str(e))
sys.exit(1)
try:
while self.proc.poll() is None:
self.started = True
time.sleep(self.interval)
self.started = True
code = -1 if self.proc is None else self.proc.poll()
self.proc = None
except Exception as e:
return
def requireStarted(self, timeout=2):
delay = 0
while delay < timeout:
if self.started is True:
break
time.sleep(self.interval * 10)
delay += self.interval * 10
def getChildren(self, timeout=1):
'''Get the child pids.'''
self.requireStarted()
if not self.proc:
return []
try:
proc = psutil.Process(pid=self.proc.pid)
delay = 0
while len(proc.children()) == 0:
if delay > timeout:
return []
time.sleep(self.interval)
delay += self.interval
return [p.pid for p in proc.children()]
except:
pass
return []
@property
def pid(self):
self.requireStarted()
return self.proc.pid if self.proc is not None else None
def kill(self, children=False):
self.requireStarted()
if children:
for child in self.getChildren():
try:
os.kill(child, 9)
except:
pass
if self.proc:
try:
os.kill(self.pid, 9)
except:
pass
self.proc = None
def isAlive(self, timeout=3):
self.requireStarted()
'''Check if the process is alive.'''
delay = 0
while self.proc is None:
if delay > timeout:
break
time.sleep(self.interval)
delay += self.interval
if self.proc is None:
return False
return self.proc.poll() is None
def isDead(self, pid, timeout=5):
self.requireStarted()
'''Check if the process was killed.
This is different than `isAlive` in that the timeout is an expectation
that the process will die before the timeout, `isAlive`'s timeout is
an expectation that the process will be scheduled before the timeout.
'''
try:
proc = psutil.Process(pid=pid)
except psutil.NoSuchProcess as e:
return True
delay = 0
while delay < timeout:
if not proc.is_running():
return True
time.sleep(self.interval)
delay += self.interval
return False
class ProcessGenerator(object):
'''Helper methods to patch into a unittest'''
generators = []
def setUp(self):
shutil.rmtree(CONFIG_DIR)
os.makedirs(CONFIG_DIR)
def _run_daemon(self, options={}, silent=False, options_only={},
overwrite={}):
'''Spawn an osquery daemon process'''
global ARGS, CONFIG_NAME, CONFIG
config = copy.deepcopy(CONFIG)
config["options"]["database_path"] += str(random.randint(1000, 9999))
config["options"]["extensions_socket"] += str(random.randint(1000, 9999))
for option in options.keys():
config["options"][option] = options[option]
flags = ["--%s=%s" % (k, v) for k, v in config["options"].items()]
for option in options_only.keys():
config["options"][option] = options_only[option]
for key in overwrite:
config[key] = overwrite[key]
utils.write_config(config)
binary = os.path.join(ARGS.build, "osquery", "osqueryd")
daemon = ProcRunner("daemon", binary, flags, silent=silent)
daemon.options = config["options"]
self.generators.append(daemon)
return daemon
def _run_extension(self, timeout=0, path=None, silent=False):
'''Spawn an osquery extension (example_extension)'''
global ARGS, CONFIG
config = copy.deepcopy(CONFIG)
config["options"]["extensions_socket"] += str(random.randint(1000, 9999))
binary = os.path.join(ARGS.build, "osquery", "example_extension.ext")
if path is not None:
config["options"]["extensions_socket"] = path
extension = ProcRunner("extension",
binary,
[
"--socket=%s" % config["options"]["extensions_socket"],
"--verbose" if not silent else "",
"--timeout=%d" % timeout,
"--interval=%d" % 0,
],
silent=silent)
self.generators.append(extension)
extension.options = config["options"]
return extension
def tearDown(self):
'''When the unit test stops, clean up child-generated processes.
Iterate through the generated daemons and extensions, and kill -9 them.
Unittest should stop processes they generate, but on failure the
tearDown method will cleanup.
'''
for generator in self.generators:
if generator.pid is not None:
try:
os.kill(generator.pid, signal.SIGKILL)
except Exception as e:
pass
class EXClient(object):
'''An osquery Thrift/extensions python client generator.'''
transport = None
'''The instance transport object.'''
_manager = None
'''The client class's reference to run-time discovered manager.'''
_client = None
'''The client class's reference to run-time discovered client.'''
def __init__(self, path=None, uuid=None):
global CONFIG
'''Create a extensions client to a UNIX path and optional UUID.'''
if path is None:
path = CONFIG["options"]["extensions_socket"]
self.path = path
if uuid:
self.path += ".%s" % str(uuid)
transport = TSocket.TSocket(unix_socket=self.path)
transport = TTransport.TBufferedTransport(transport)
self.protocol = TBinaryProtocol.TBinaryProtocol(transport)
self.transport = transport
@classmethod
def setUp(cls, manager, client):
'''Set the manager and client modules to generate clients from.'''
cls._manager = manager
cls._client = client
def close(self):
if self.transport:
self.transport.close()
def open(self, timeout=0.1, interval=0.01):
'''Attempt to open the UNIX domain socket.'''
delay = 0
while delay < timeout:
try:
self.transport.open()
return True
except Exception as e:
pass
delay += interval
time.sleep(interval)
return False
def getEM(self):
'''Return an extension manager (osquery core) client.'''
if self._manager is None:
raise(Exception, "The EXClient must be 'setUp' with a manager")
return self._manager.Client(self.protocol)
def getEX(self):
'''Return an extension (osquery extension) client.'''
if self._client is None:
raise(Exception, "The EXClient must be 'setUp' with a client")
return self._client.Client(self.protocol)
class Autoloader(object):
'''Helper class to write a module or extension autoload file.'''
def __init__(self, autoloads=[]):
global CONFIG_DIR
self.path = CONFIG_DIR + "ext.load" + str(random.randint(1000, 9999))
with open(self.path, "w") as fh:
fh.write("\n".join(autoloads))
def __del__(self):
try:
os.unlink(self.path)
except:
pass
class TimeoutRunner(object):
def __init__(self, cmd=[], timeout_sec=1):
self.stdout = None
self.stderr = None
self.proc = subprocess.Popen(" ".join(cmd),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
kill_proc = lambda p: p.kill()
timer = threading.Timer(timeout_sec, kill_proc, [self.proc])
timer.start()
self.stdout, self.stderr = self.proc.communicate()
timer.cancel()
class Tester(object):
def __init__(self):
global ARGS, CONFIG, CONFIG_DIR
parser = argparse.ArgumentParser(description=(
"osquery python integration testing."
))
parser.add_argument(
"--config", metavar="FILE", default=None,
help="Use special options from a config."
)
parser.add_argument(
"--verbose", default=False, action="store_true",
help="Run daemons and extensions with --verbose"
)
# Directory structure options
parser.add_argument(
"--build", metavar="PATH", default=".",
help="Path to osquery build (./build/<sys>/)."
)
ARGS = parser.parse_args()
if not os.path.exists(ARGS.build):
print ("Cannot find --build: %s" % ARGS.build)
print ("You must first run: make")
exit(1)
# Write config
random.seed(time.time())
try:
shutil.rmtree(CONFIG_DIR)
except:
# Allow the tester to fail
pass
os.makedirs(CONFIG_DIR)
CONFIG = read_config(ARGS.config) if ARGS.config else DEFAULT_CONFIG
def run(self):
os.setpgrp()
unittest_args = [sys.argv[0]]
if ARGS.verbose:
unittest_args += ["-v"]
unittest.main(argv=unittest_args)
def expect(functional, expected, interval=0.01, timeout=4):
"""Helper function to run a function with expected latency"""
delay = 0
result = None
while result is None or len(result) != expected:
try:
result = functional()
if len(result) == expected:
break
except Exception as e:
print ("Expect exception (%s): %s not %s" % (
str(e), str(functional), expected))
return None
if delay >= timeout:
return None
time.sleep(interval)
delay += interval
return result
class QueryTester(ProcessGenerator, unittest.TestCase):
def setUp(self):
self.binary = os.path.join(ARGS.build, "osquery", "osqueryi")
self.daemon = self._run_daemon({
# The set of queries will hammer the daemon process.
"disable_watchdog": True,
# Enable the 'hidden' flag "registry_exceptions" to prevent catching.
"registry_exceptions": True,
})
self.assertTrue(self.daemon.isAlive())
# The sets of example tests will use the extensions APIs.
self.client = EXClient(self.daemon.options["extensions_socket"])
expectTrue(self.client.open)
self.assertTrue(self.client.open())
self.em = self.client.getEM()
def tearDown(self):
self.client.close()
self.daemon.kill()
def _execute(self, query):
try:
result = self.em.query(query)
self.assertEqual(result.status.code, 0)
return result.response
except Exception as e:
print("General exception executing query: %s" % (
utils.lightred(query)))
raise e
def _execute_set(self, queries):
for example in queries:
start_time = time.time()
result = self._execute(example)
end_time = time.time()
duration_ms = int((end_time - start_time) * 1000)
if duration_ms > 2000:
# Query took longer than 2 seconds.
duration_ms = utils.lightred(duration_ms)
print("Query (%sms): %s, rows: %d" % (
duration_ms, example, len(result)))
def expectTrue(functional, interval=0.01, timeout=8):
"""Helper function to run a function with expected latency"""
delay = 0
while delay < timeout:
if functional():
return True
time.sleep(interval)
delay += interval
return False
def assertPermissions():
stat_info = os.stat('.')
if stat_info.st_uid != os.getuid():
print (utils.lightred("Will not load modules/extensions in tests."))
print (utils.lightred("Repository owner (%d) executer (%d) mismatch" % (
stat_info.st_uid, os.getuid())))
exit(1)
def loadThriftFromBuild(build_dir):
'''Find and import the thrift-generated python interface.'''
thrift_path = build_dir + "/generated/gen-py"
try:
sys.path.append(thrift_path)
sys.path.append(thrift_path + "/osquery")
from osquery import ExtensionManager, Extension
EXClient.setUp(ExtensionManager, Extension)
except ImportError as e:
print ("Cannot import osquery thrift API from %s" % (thrift_path))
print ("Exception: %s" % (str(e)))
print ("You must first run: make")
exit(1)
|
server.py | #Code based on https://github.com/ST0263/st0263-20212/blob/main/LabSocketsMultiThread/ServerLab.py
#!/usr/bin/env python3
import socket
import threading
import constants
import connection
# Defining a socket object...
server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server_address = constants.IP_SERVER
def main():
print("***********************************")
print("Server is running (", server_address, ":", constants.PORT, ")...")
server_execution()
#Function to start server process...
def server_execution():
tuple_connection = (server_address, constants.PORT)
server_socket.bind(tuple_connection)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print ('Socket is bind to address and port...')
server_socket.listen(5)
print('Socket is listening...')
while True:
client_connection, client_address = server_socket.accept()
client_thread = threading.Thread(target=connection.handler_client_connection, args=(client_connection,client_address))
client_thread.start()
print('Socket is closed...')
server_socket.close()
if __name__ == "__main__":
main() |
test_ecu.py |
# for standalone-test
import sys
sys.path.append(".")
import unittest
import time
import threading
try:
# Python27
import Queue as queue
except ImportError:
# Python35
import queue
import j1939
class AcceptAllCA(j1939.ControllerApplication):
"""CA to accept all messages"""
def __init__(self, name, device_address_preferred=None):
# old fashion calling convention for compatibility with Python2
j1939.ControllerApplication.__init__(self, name, device_address_preferred)
def message_acceptable(self, dest_address):
"""Indicates if this CA would accept a message
(OVERLOADED FUNCTION)
This function indicates the acceptance of this CA for the given dest_address.
"""
return True
class TestECU(unittest.TestCase):
# TODO: should we change the async_can_feeder to use the can backend with
# bustype 'virtual' instead of injecting our messages directly?
class MsgType(object):
CANRX = 0
CANTX = 1
PDU = 2
def _async_can_feeder(self):
"""Asynchronous feeder"""
while True:
message = self.message_queue.get(block=True)
if message is self.STOP_THREAD:
break
recv_time = message[3]
if recv_time == 0.0:
recv_time = time.time()
self.ecu.notify(message[1], message[2], recv_time)
def _inject_messages_into_ecu(self):
while self.can_messages and self.can_messages[0][0] == TestECU.MsgType.CANRX:
message = self.can_messages.pop(0)
self.message_queue.put(message)
def _send_message(self, can_id, data):
"""Will be used instead of the usual ecu.send_message method.
Checks the message sent and generates the apropriate answer.
The data is fed from self.can_messages.
"""
expected_data = self.can_messages.pop(0)
self.assertEqual(expected_data[0], TestECU.MsgType.CANTX, "No transmission was expected")
self.assertEqual(can_id, expected_data[1])
self.assertSequenceEqual(data, expected_data[2])
self._inject_messages_into_ecu()
def _on_message(self, pgn, data):
"""Feed incoming message to this testcase.
:param int pgn:
Parameter Group Number of the message
:param bytearray data:
Data of the PDU
"""
expected_data = self.pdus.pop(0)
self.assertEqual(expected_data[0], TestECU.MsgType.PDU)
self.assertEqual(pgn, expected_data[1])
self.assertSequenceEqual(data, expected_data[2])
def setUp(self):
"""Called before each test methode.
Method called to prepare the test fixture. This is called immediately
before calling the test method; other than AssertionError or SkipTest,
any exception raised by this method will be considered an error rather
than a test failure. The default implementation does nothing.
"""
self.can_messages = []
self.pdus = []
self.STOP_THREAD = object()
self.message_queue = queue.Queue()
self.message_thread = threading.Thread(target=self._async_can_feeder)
self.message_thread.start()
self.ecu = j1939.ElectronicControlUnit()
# redirect the send_message from the can bus to our simulation
self.ecu.send_message = self._send_message
# install a fake-CA to accept all messages
ca = AcceptAllCA(None)
self.ecu.add_ca(controller_application = ca)
def tearDown(self):
"""Called after each test methode.
Method called immediately after the test method has been called and
the result recorded. This is called even if the test method raised an
exception, so the implementation in subclasses may need to be
particularly careful about checking internal state. Any exception,
other than AssertionError or SkipTest, raised by this method will be
considered an additional error rather than a test failure (thus
increasing the total number of reported errors). This method will only
be called if the setUp() succeeds, regardless of the outcome of the
test method. The default implementation does nothing.
"""
self.ecu.stop()
self.message_queue.put(self.STOP_THREAD)
self.message_thread.join()
#def test_connect(self):
# self.ecu.connect(bustype="virtual", channel=1)
# self.ecu.disconnect()
def test_broadcast_receive_short(self):
"""Test the receivement of a normal broadcast message
For this test we receive the GFI1 (Fuel Information 1 (Gaseous)) PGN 65202 (FEB2).
Its length is 8 Bytes. The contained values are bogous of cause.
"""
self.can_messages = [
(TestECU.MsgType.CANRX, 0x00FEB201, [1, 2, 3, 4, 5, 6, 7, 8], 0.0),
]
self.pdus = [
(TestECU.MsgType.PDU, 65202, [1, 2, 3, 4, 5, 6, 7, 8]),
]
self.ecu.subscribe(self._on_message)
self._inject_messages_into_ecu()
# wait until all messages are processed asynchronously
while len(self.pdus)>0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.100)
self.ecu.unsubscribe(self._on_message)
def test_broadcast_receive_long(self):
"""Test the receivement of a long broadcast message
For this test we receive the TTI2 (Trip Time Information 2) PGN 65200 (FEB0).
Its length is 20 Bytes. The contained values are bogous of cause.
"""
self.can_messages = [
(TestECU.MsgType.CANRX, 0x00ECFF01, [32, 20, 0, 3, 255, 0xB0, 0xFE, 0], 0.0), # TP.CM BAM (to global Address)
(TestECU.MsgType.CANRX, 0x00EBFF01, [1, 1, 2, 3, 4, 5, 6, 7], 0.0), # TP.DT 1
(TestECU.MsgType.CANRX, 0x00EBFF01, [2, 1, 2, 3, 4, 5, 6, 7], 0.0), # TP.DT 2
(TestECU.MsgType.CANRX, 0x00EBFF01, [3, 1, 2, 3, 4, 5, 6, 255], 0.0), # TP.DT 3
]
self.pdus = [
(TestECU.MsgType.PDU, 65200, [1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6]),
]
self.ecu.subscribe(self._on_message)
self._inject_messages_into_ecu()
# wait until all messages are processed asynchronously
while len(self.pdus)>0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.100)
self.ecu.unsubscribe(self._on_message)
def test_peer_to_peer_receive_short(self):
"""Test the receivement of a normal peer-to-peer message
For this test we receive the ATS (Anti-theft Status) PGN 56320 (DC00).
Its length is 8 Bytes. The contained values are bogous of cause.
"""
self.can_messages = [
(TestECU.MsgType.CANRX, 0x00DC0201, [1, 2, 3, 4, 5, 6, 7, 8], 0.0), # TP.CM RTS
]
self.pdus = [
(TestECU.MsgType.PDU, 56320, [1, 2, 3, 4, 5, 6, 7, 8], 0),
]
self.ecu.subscribe(self._on_message)
self._inject_messages_into_ecu()
# wait until all messages are processed asynchronously
while len(self.pdus)>0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.100)
self.ecu.unsubscribe(self._on_message)
def test_peer_to_peer_receive_long(self):
"""Test the receivement of a long peer-to-peer message
For this test we receive the TTI2 (Trip Time Information 2) PGN 65200 (FEB0).
Its length is 20 Bytes. The contained values are bogous of cause.
"""
# TODO: we have to select another PGN here! This one is for broadcasting only!
self.can_messages = [
(TestECU.MsgType.CANRX, 0x00EC0201, [16, 20, 0, 3, 1, 176, 254, 0], 0.0), # TP.CM RTS
(TestECU.MsgType.CANTX, 0x1CEC0102, [17, 1, 1, 255, 255, 176, 254, 0], 0.0), # TP.CM CTS 1
(TestECU.MsgType.CANRX, 0x00EB0201, [1, 1, 2, 3, 4, 5, 6, 7], 0.0), # TP.DT 1
(TestECU.MsgType.CANTX, 0x1CEC0102, [17, 1, 2, 255, 255, 176, 254, 0], 0.0), # TP.CM CTS 2
(TestECU.MsgType.CANRX, 0x00EB0201, [2, 1, 2, 3, 4, 5, 6, 7], 0.0), # TP.DT 2
(TestECU.MsgType.CANTX, 0x1CEC0102, [17, 1, 3, 255, 255, 176, 254, 0], 0.0), # TP.CM CTS 3
(TestECU.MsgType.CANRX, 0x00EB0201, [3, 1, 2, 3, 4, 5, 6, 255], 0.0), # TP.DT 3
(TestECU.MsgType.CANTX, 0x1CEC0102, [19, 20, 0, 3, 255, 176, 254, 0], 0.0), # TP.CM EOMACK
]
self.pdus = [
(TestECU.MsgType.PDU, 65200, [1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6]),
]
self.ecu.subscribe(self._on_message)
self._inject_messages_into_ecu()
# wait until all messages are processed asynchronously
while len(self.pdus)>0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.100)
self.ecu.unsubscribe(self._on_message)
def test_peer_to_peer_send_short(self):
"""Test sending of a short peer-to-peer message
For this test we send the ERC1 (Electronic Retarder Controller 1) PGN 61440 (F000).
Its length is 8 Bytes. The contained values are bogous of cause.
"""
self.can_messages = [
(TestECU.MsgType.CANTX, 0x18009B90, [1, 2, 3, 4, 5, 6, 7, 8], 0.0), # PGN 61440
]
pdu = (TestECU.MsgType.PDU, 61440, [1, 2, 3, 4, 5, 6, 7, 8])
self.ecu.subscribe(self._on_message)
# sending from 144 to 155 with prio 6
self.ecu.send_pgn(0, pdu[1], 155, 6, 144, pdu[2])
# wait until all messages are processed asynchronously
while len(self.can_messages)>0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.100)
self.ecu.unsubscribe(self._on_message)
def test_peer_to_peer_send_long(self):
"""Test sending of a long peer-to-peer message
For this test we send a fantasy message with PGN 57088 (DF00).
Its length is 20 Bytes.
"""
self.can_messages = [
(TestECU.MsgType.CANTX, 0x18EC9B90, [16, 20, 0, 3, 255, 155, 0, 0], 0.0), # TP.CM RTS 1
(TestECU.MsgType.CANRX, 0x1CEC909B, [17, 1, 1, 255, 255, 155, 0, 0], 0.0), # TP.CM CTS 1
(TestECU.MsgType.CANTX, 0x1CEB9B90, [1, 1, 2, 3, 4, 5, 6, 7], 0.0), # TP.DT 1
(TestECU.MsgType.CANRX, 0x1CEC909B, [17, 1, 2, 255, 255, 155, 0, 0], 0.0), # TP.CM CTS 2
(TestECU.MsgType.CANTX, 0x1CEB9B90, [2, 1, 2, 3, 4, 5, 6, 7], 0.0), # TP.DT 2
(TestECU.MsgType.CANRX, 0x1CEC909B, [17, 1, 3, 255, 255, 155, 0, 0], 0.0), # TP.CM CTS 3
(TestECU.MsgType.CANTX, 0x1CEB9B90, [3, 1, 2, 3, 4, 5, 6, 255], 0.0), # TP.DT 3
(TestECU.MsgType.CANRX, 0x1CEC909B, [19, 20, 0, 3, 255, 155, 0, 0], 0.0), # TP.CM EOMACK
]
pdu = (TestECU.MsgType.PDU, 57088, [1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6])
self.ecu.subscribe(self._on_message)
# sending from 144 to 155 with prio 6
self.ecu.send_pgn(0, pdu[1], 155, 6, 144, pdu[2])
# wait until all messages are processed asynchronously
while len(self.can_messages)>0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.100)
self.ecu.unsubscribe(self._on_message)
def test_broadcast_send_long(self):
"""Test sending of a long broadcast message (with BAM)
For this test we use the TTI2 (Trip Time Information 2) PGN 65200 (FEB0).
Its length is 20 Bytes. The contained values are bogous of cause.
"""
self.can_messages = [
(TestECU.MsgType.CANTX, 0x18ECFF90, [32, 20, 0, 3, 255, 255, 176, 0], 0.0), # TP.BAM
(TestECU.MsgType.CANTX, 0x1CEBFF90, [1, 1, 2, 3, 4, 5, 6, 7], 0.0), # TP.DT 1
(TestECU.MsgType.CANTX, 0x1CEBFF90, [2, 1, 2, 3, 4, 5, 6, 7], 0.0), # TP.DT 2
(TestECU.MsgType.CANTX, 0x1CEBFF90, [3, 1, 2, 3, 4, 5, 6, 255], 0.0), # TP.DT 3
]
pdu = (TestECU.MsgType.PDU, 65200, [1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6])
self.ecu.subscribe(self._on_message)
# sending from 144 to GLOABL with prio 6
self.ecu.send_pgn(0, pdu[1], j1939.ParameterGroupNumber.Address.GLOBAL, 6, 144, pdu[2])
# wait until all messages are processed asynchronously
while len(self.can_messages)>0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.100)
self.ecu.unsubscribe(self._on_message)
if __name__ == '__main__':
unittest.main()
|
test.py | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import model as M
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = getattr(M, args.arch)(pretrained=(args.model is None))
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1, "valid_acc1") / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5, "valid_acc5") / dist.get_world_size()
return loss, acc1, acc5
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
_, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
logger.info("Valid %.3f / %.3f", valid_acc, valid_acc5)
def infer(model, data_queue, args, epoch=0):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = image.astype("float32") # convert np.uint8 to float32
label = label.astype("int32")
loss, acc1, acc5 = model(image, label)
objs.update(loss.numpy()[0], n)
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
total_time.update(time.time() - t)
t = time.time()
if step % args.report_freq == 0 and dist.get_rank() == 0:
logger.info(
"Epoch %d Step %d, %s %s %s %s",
epoch,
step,
objs,
top1,
top5,
total_time,
)
return objs.avg, top1.avg, top5.avg
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
test_threading.py | """
Tests for the threading module.
"""
import test.support
from test.support import verbose, import_module, cpython_only, unlink
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
import textwrap
import traceback
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
@test.support.cpython_only
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
@test.support.cpython_only
def test_finalize_running_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
newgil = hasattr(sys, 'getswitchinterval')
if newgil:
geti, seti = sys.getswitchinterval, sys.setswitchinterval
else:
geti, seti = sys.getcheckinterval, sys.setcheckinterval
old_interval = geti()
try:
for i in range(1, 100):
seti(i * 0.0002 if newgil else i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
seti(old_interval)
@test.support.cpython_only
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'needs os.fork()')
def test_fork_at_exit(self):
# bpo-42350: Calling os.fork() after threading._shutdown() must
# not log an error.
code = textwrap.dedent("""
import atexit
import os
import sys
from test.support import wait_process
# Import the threading module to register its "at fork" callback
import threading
def exit_handler():
pid = os.fork()
if not pid:
print("child process ok", file=sys.stderr, flush=True)
# child process
sys.exit()
else:
wait_process(pid, exitcode=0)
# exit_handler() will be called after threading._shutdown()
atexit.register(exit_handler)
""")
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err.rstrip(), b'child process ok')
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
newgil = hasattr(sys, 'getswitchinterval')
if newgil:
geti, seti = sys.getswitchinterval, sys.setswitchinterval
else:
geti, seti = sys.getcheckinterval, sys.setcheckinterval
old_interval = geti()
self.addCleanup(seti, old_interval)
# Make the bug more likely to manifest.
seti(1e-6 if newgil else 1)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
support.wait_process(pid, exitcode=10)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
from test import support
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
support.wait_process(pid, exitcode=0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
from test import support
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
support.wait_process(pid, exitcode=0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
@test.support.cpython_only
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=support.SHORT_TIMEOUT), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
@cpython_only
def test_locals_at_exit(self):
# bpo-19466: thread locals must not be deleted before destructors
# are called
rc, out, err = assert_python_ok("-c", """if 1:
import threading
class Atexit:
def __del__(self):
print("thread_dict.atexit = %r" % thread_dict.atexit)
thread_dict = threading.local()
thread_dict.atexit = "value"
atexit = Atexit()
""")
self.assertEqual(out.rstrip(), b"thread_dict.atexit = 'value'")
def test_leak_without_join(self):
# bpo-37788: Test that a thread which is not joined explicitly
# does not leak. Test written for reference leak checks.
def noop(): pass
with support.wait_threads_exit():
threading.Thread(target=noop).start()
# Thread.join() is not called
def test_import_from_another_thread(self):
# bpo-1596321: If the threading module is first import from a thread
# different than the main thread, threading._shutdown() must handle
# this case without logging an error at Python exit.
code = textwrap.dedent('''
import _thread
import sys
event = _thread.allocate_lock()
event.acquire()
def import_threading():
import threading
event.release()
if 'threading' in sys.modules:
raise Exception('threading is already imported')
_thread.start_new_thread(import_threading, ())
# wait until the threading module is imported
event.acquire()
event.release()
if 'threading' not in sys.modules:
raise Exception('threading is not imported')
# don't wait until the thread completes
''')
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
from test import support
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
from test import support
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
support.wait_process(pid, exitcode=50)
else:
os._exit(50)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(51)
else:
os._exit(52)
else:
support.wait_process(pid, exitcode=51)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def pipe(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
if hasattr(os, 'set_blocking'):
os.set_blocking(r, False)
return (r, w)
@cpython_only
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = f"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep({test.support.SHORT_TIMEOUT})
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
def test_multithread_modify_file_noerror(self):
# See issue25872
def modify_file():
with open(test.support.TESTFN, 'w', encoding='utf-8') as fp:
fp.write(' ')
traceback.format_stack()
self.addCleanup(unlink, test.support.TESTFN)
threads = [
threading.Thread(target=modify_file)
for i in range(100)
]
for t in threads:
t.start()
t.join()
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
class AtexitTests(unittest.TestCase):
def test_atexit_output(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
def run_last():
print('parrot')
threading._register_atexit(run_last)
""")
self.assertFalse(err)
self.assertEqual(out.strip(), b'parrot')
def test_atexit_called_once(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
from unittest.mock import Mock
mock = Mock()
threading._register_atexit(mock)
mock.assert_not_called()
# force early shutdown to ensure it was called once
threading._shutdown()
mock.assert_called_once()
""")
self.assertFalse(err)
def test_atexit_after_shutdown(self):
# The only way to do this is by registering an atexit within
# an atexit, which is intended to raise an exception.
rc, out, err = assert_python_ok("-c", """if True:
import threading
def func():
pass
def run_last():
threading._register_atexit(func)
threading._register_atexit(run_last)
""")
self.assertTrue(err)
self.assertIn("RuntimeError: can't register atexit after shutdown",
err.decode())
if __name__ == "__main__":
unittest.main()
|
main.py | from logging import debug, exception
from flask import Flask, request
import os
import asyncio
import threading
import ssl
import aiohttp
import nest_asyncio
import json
from openleadr.client import OpenADRClient
from openleadr.utils import report_callback
from openleadr.enums import MEASUREMENTS
nest_asyncio.apply()
client = OpenADRClient(ven_name='myven', vtn_url=os.environ.get('VTN_URL'))
client.add_report(report_callback, client.ven_id, report_name = 'TELEMETRY_STATUS')
client.add_report(report_callback, client.ven_id, report_name = 'TELEMETRY_USAGE', measurement= MEASUREMENTS.POWER_REAL)
app = Flask(__name__)
@app.route('/create_party_registration', methods=['POST', 'GET'])
async def create_party_registration():
await client.create_party_registration(ven_id = client.ven_id, registration_id=client.registration_id)
return {'status': 200, 'body': 'return from the create party registration'}
@app.route('/create_party_registration_while_registered', methods=['POST', 'GET'])
async def create_party_registration_while_registered():
await client.create_party_registration_while_registered()
return {'status': 200, 'body': 'return from the create party registration'}
@app.route('/query_registration', methods=['POST'])
async def query_registration():
await client.query_registration()
return {'status': 200, 'body': 'return from the query registration'}
@app.route('/cancel_party_registration', methods=['POST'])
async def cancel_party_registration():
await client.cancel_party_registration()
return {'status': 200, 'body': 'return from the cancel registration'}
@app.route('/register_reports')
async def register_reports():
if client.reports:
await client.register_reports(client.reports)
return {'status': 200, 'body': 'The VEN has sent register report with metadata.'}
@app.route('/request_event', methods=['POST'])
async def request_event():
response_type, response_payload = await client.request_event()
if response_type == 'oadrDistributeEvent':
if 'events' in response_payload and len(response_payload['events']) > 0:
await client._on_event(response_payload)
return {'status': 200, 'body': 'return from the request event'}
@app.route('/create_opt', methods =['POST'])
async def create_opt():
return await client.create_opt(request.data)
@app.route('/cancel_opt', methods = ['POST'])
async def cancel_opt():
return await client.cancel_opt(request.data)
def client_run():
loop = asyncio.new_event_loop()
loop.create_task(client.run())
loop.run_forever()
if __name__ == "__main__":
t1 = threading.Thread(target=app.run, kwargs={'host': '0.0.0.0', 'port': os.environ.get('PORT') })
t2 = threading.Thread(target=client_run)
t1.start()
t2.start()
t2.join() |
killThreadTest.py | import os
import sys
import time
import threading
from signal import signal, SIGINT
import subprocess
import multiprocessing
threads = 10
threadL=[]
def shareOrder(iter):
proc = subprocess.Popen(["script -c \"~/onionshare/dev_scripts/onionshare --website hello.txt\" -f output" + str(iter) + ".txt"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True)
def create():
for i in range(0,threads):
thread = multiprocessing.Process(target=shareOrder,args=(i,))
thread.start()
threadL.append(thread)
def ex():
time.sleep(30)
f = open("output.txt", 'w')
for i in range(0,threads):
try:
threadL[i].terminate()
f.write("thread terminate pass:" + str(i) + "\n")
except:
f.write("thread terminate failure:" + str(i) + "\n")
for i in range(0,threads):
try:
thread = multiprocessing.Process(target=shareOrder,args=(i,))
thread.start()
threadL[i] = thread
f.write("thread restart pass:" + str(i) + "\n")
except:
f.write("thread restart failure:" + str(i) + "\n")
for i in range(0,threads):
try:
threadL[i].terminate()
f.write("thread terminate 2 pass:" + str(i) + "\n")
except:
f.write("thread terminate 2 failure:" + str(i) + "\n")
print("I am still working")
f.close()
def loop():
while True:
print("hello")
time.sleep(5)
def ps():
os.system("reset")
os.system("ps aux > ps.txt")
f = open("ps.txt", 'r')
line = f.readline()
while line != '':
if line.find('onionshare') != -1:
os.system( 'kill ' + line.split()[1])
line = f.readline()
f.close()
def runner():
print("creating threads")
create()
print("starting looper")
l = multiprocessing.Process(target=loop)
l.start()
print("ex")
ex()
print("stopping loop")
try:
l.terminate()
print("loop ender passes")
except:
print("loop ender failure")
time.sleep(10)
print("ps")
ps()
runner()
|
deploy_contrail_vm.py | #!/usr/bin/env python
from os import system, path
from sys import exit
from threading import Thread
from time import sleep
from argparse import ArgumentParser
from getpass import getpass
import tarfile
import urllib2
from pyVim import connect
from pyVmomi import vim
from manage_dvs_pg import get_obj
def get_args():
"""
Get CLI arguments.
"""
parser = ArgumentParser(description='Arguments for talking to vCenter')
parser.add_argument('-s', '--host',
required=True,
action='store',
help='vSphere service to connect to.')
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on.')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='Username to use.')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use.')
parser.add_argument('--datacenter_name',
required=False,
action='store',
default=None,
help='Name of the Datacenter you\
wish to use. If omitted, the first\
datacenter will be used.')
parser.add_argument('--datastore_name',
required=False,
action='store',
default=None,
help='Datastore you wish the VM to be deployed to. \
If left blank, VM will be put on the first \
datastore found.')
parser.add_argument('--cluster_name',
required=False,
action='store',
default=None,
help='Name of the cluster you wish the VM to\
end up on. If left blank the first cluster found\
will be used')
parser.add_argument('-f', '--ova_path',
required=True,
action='store',
default=None,
help='Path of the OVA file to deploy.')
parser.add_argument('--host_name', required=True, action='store', help='Name of the host to launch VM')
parser.add_argument('--vm_name', required=True, action='store', help='Name of the VM')
args = parser.parse_args()
if not args.password:
args.password = getpass(prompt='Enter password: ')
return args
def get_ovf_descriptor(ovf_file):
"""
Read in the OVF descriptor.
"""
try:
ovfd = ovf_file.read()
return ovfd
except:
print "Could not read file: %s" % ovf_file
exit(1)
def get_obj_in_list(obj_name, obj_list):
"""
Gets an object out of a list (obj_list) whos name matches obj_name.
"""
for o in obj_list:
if o.name == obj_name:
return o
print ("Unable to find object by the name of %s in list:\n%s" %
(o.name, map(lambda o: o.name, obj_list)))
exit(1)
def get_objects(si, args):
"""
Return a dict containing the necessary objects for deployment.
"""
# Get datacenter object.
datacenter_list = si.content.rootFolder.childEntity
if args.datacenter_name:
datacenter_obj = get_obj_in_list(args.datacenter_name, datacenter_list)
else:
datacenter_obj = datacenter_list[0]
# Get datastore object.
datastore_list = datacenter_obj.datastoreFolder.childEntity
if args.datastore_name:
datastore_obj = get_obj_in_list(args.datastore_name, datastore_list)
elif len(datastore_list) > 0:
datastore_obj = datastore_list[0]
else:
print "No datastores found in DC (%s)." % datacenter_obj.name
# Get cluster object.
cluster_list = datacenter_obj.hostFolder.childEntity
if args.cluster_name:
cluster_obj = get_obj_in_list(args.cluster_name, cluster_list)
elif len(cluster_list) > 0:
cluster_obj = cluster_list[0]
else:
print "No clusters found in DC (%s)." % datacenter_obj.name
# Get host object
hosts_list = cluster_obj.host
host_obj = get_obj_in_list(args.host_name, hosts_list)
# Get VM object
# Generate resource pool.
resource_pool_obj = cluster_obj.resourcePool
return {"datacenter": datacenter_obj,
"datastore": datastore_obj,
"resource pool": resource_pool_obj,
"host_obj": host_obj}
def keep_lease_alive(lease):
"""
Keeps the lease alive while POSTing the VMDK.
"""
while(True):
sleep(5)
try:
# Choosing arbitrary percentage to keep the lease alive.
lease.HttpNfcLeaseProgress(50)
if (lease.state == vim.HttpNfcLease.State.done):
return
# If the lease is released, we get an exception.
# Returning to kill the thread.
except:
return
def auto_start_vm(si, args, vm_obj):
si_content = si.RetrieveContent()
objs = get_objects(si, args)
host = objs['host_obj']
vm_obj = get_obj(si_content, [vim.VirtualMachine], args.vm_name)
host_settings = vim.host.AutoStartManager.SystemDefaults()
host_settings.enabled = True
config = host.configManager.autoStartManager.config
config.defaults = host_settings
auto_power_info = vim.host.AutoStartManager.AutoPowerInfo()
auto_power_info.key = vm_obj
auto_power_info.startOrder = 1
auto_power_info.startAction = "powerOn"
auto_power_info.startDelay = -1
auto_power_info.stopAction = "powerOff"
auto_power_info.stopDelay = -1
auto_power_info.waitForHeartbeat = 'no'
config.powerInfo = [auto_power_info]
host.configManager.autoStartManager.ReconfigureAutostart(config)
def main():
args = get_args()
t = tarfile.open(args.ova_path)
ovffilename = list(filter(lambda x: x.endswith(".ovf"), t.getnames()))[0]
ovffile = t.extractfile(ovffilename)
ovfd = get_ovf_descriptor(ovffile)
ovffile.close()
try:
ssl = __import__("ssl")
context = ssl._create_unverified_context()
si = connect.SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=args.port,
sslContext=context)
except Exception as e:
si = connect.SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
except:
print "Unable to connect to %s" % args.host
exit(1)
objs = get_objects(si, args)
# if VM already exists exit right away
si_content = si.RetrieveContent()
vm_obj = get_obj(si_content, [vim.VirtualMachine], args.vm_name)
if vm_obj:
print "vm %s already exists" %args.vm_name
exit(0)
manager = si.content.ovfManager
spec_params = vim.OvfManager.CreateImportSpecParams()
# update spec_params to include host and name of the VM and possibly nics
spec_params.hostSystem = objs['host_obj']
spec_params.diskProvisioning = "thick"
spec_params.entityName = args.vm_name
import_spec = manager.CreateImportSpec(ovfd,
objs["resource pool"],
objs["datastore"],
spec_params)
lease = objs["resource pool"].ImportVApp(import_spec.importSpec,
objs["datacenter"].vmFolder,
objs['host_obj'])
while(True):
if lease.state == vim.HttpNfcLease.State.ready:
# Spawn a thread to keep the lease active while POSTing
# VMDK.
keepalive_thread = Thread(target=keep_lease_alive, args=(lease,))
keepalive_thread.daemon = True
keepalive_thread.start()
try:
for deviceUrl in lease.info.deviceUrl:
url = deviceUrl.url.replace('*', args.host)
fileItem = list(filter(lambda x: x.deviceId ==
deviceUrl.importKey,
import_spec.fileItem))[0]
ovffilename = list(filter(lambda x: x == fileItem.path,
t.getnames()))[0]
ovffile = t.extractfile(ovffilename)
headers = { 'Content-length' : ovffile.size }
req = urllib2.Request(url, ovffile, headers)
try:
response = urllib2.urlopen(req, context = context)
except:
response = urllib2.urlopen(req)
lease.HttpNfcLeaseComplete()
except:
raise
keepalive_thread.join()
auto_start_vm(si, args, vm_obj)
connect.Disconnect(si)
return 0
elif lease.state == vim.HttpNfcLease.State.error:
print "Lease error: %s" % lease.error
connect.Disconnect(si)
exit(1)
if __name__ == "__main__":
exit(main())
|
main.py | # MIT License
# Copyright (c) 2021 SUBIN
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
try:
import asyncio
from pyrogram import Client, idle, filters
import os
from config import Config
from utils import mp, USERNAME, FFMPEG_PROCESSES
from pyrogram.raw import functions, types
import os
import sys
from time import sleep
from threading import Thread
from signal import SIGINT
import subprocess
except ModuleNotFoundError:
import os
import sys
import subprocess
file = os.path.abspath("requirements.txt")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", file, "--upgrade"]
)
os.execl(sys.executable, sys.executable, *sys.argv)
CHAT = Config.CHAT
bot = Client(
"Musicplayer",
Config.API_ID,
Config.API_HASH,
bot_token=Config.BOT_TOKEN,
plugins=dict(root="plugins"),
)
if not os.path.isdir("./downloads"):
os.makedirs("./downloads")
async def main():
async with bot:
await mp.start_radio()
def stop_and_restart():
bot.stop()
os.system("git pull")
sleep(10)
os.execl(sys.executable, sys.executable, *sys.argv)
bot.run(main())
bot.start()
@bot.on_message(
filters.command(["restart", f"restart@{USERNAME}"])
& filters.user(Config.ADMINS)
& (filters.chat(CHAT) | filters.private)
)
async def restart(client, message):
await message.reply_text("🔄 Updating and Restarting...")
await asyncio.sleep(3)
try:
await message.delete()
except:
pass
process = FFMPEG_PROCESSES.get(CHAT)
if process:
try:
process.send_signal(SIGINT)
except subprocess.TimeoutExpired:
process.kill()
except Exception as e:
print(e)
pass
FFMPEG_PROCESSES[CHAT] = ""
Thread(target=stop_and_restart).start()
bot.send(
functions.bots.SetBotCommands(
commands=[
types.BotCommand(command="start", description="Check if bot alive"),
types.BotCommand(command="help", description="Shows help message"),
types.BotCommand(
command="play", description="Play song from youtube/audiofile"
),
types.BotCommand(
command="splay",
description="Play song from JioSaavn, use -a flag to play an album.",
),
types.BotCommand(
command="cplay", description="Plays music files from a channel."
),
types.BotCommand(
command="yplay",
description="Plays music files from a youtube playlist.",
),
types.BotCommand(
command="player", description="Shows current playing song with controls"
),
types.BotCommand(command="playlist", description="Shows the playlist"),
types.BotCommand(
command="clearplaylist", description="Clears the current playlist"
),
types.BotCommand(command="shuffle", description="Shuffle the playlist"),
types.BotCommand(
command="export",
description="Export current playlist as json file for future use.",
),
types.BotCommand(
command="import", description="Import a previously exported playlist."
),
types.BotCommand(
command="upload",
description="Upload current playing song as audio file.",
),
types.BotCommand(command="skip", description="Skip the current song"),
types.BotCommand(command="join", description="Join VC"),
types.BotCommand(command="leave", description="Leave from VC"),
types.BotCommand(command="vc", description="Ckeck if VC is joined"),
types.BotCommand(command="stop", description="Stops Playing"),
types.BotCommand(command="radio", description="Start radio / Live stream"),
types.BotCommand(command="stopradio", description="Stops radio/Livestream"),
types.BotCommand(command="replay", description="Replay from beggining"),
types.BotCommand(command="clean", description="Cleans RAW files"),
types.BotCommand(command="pause", description="Pause the song"),
types.BotCommand(command="resume", description="Resume the paused song"),
types.BotCommand(command="mute", description="Mute in VC"),
types.BotCommand(command="volume", description="Set volume between 0-200"),
types.BotCommand(command="unmute", description="Unmute in VC"),
types.BotCommand(
command="restart", description="Update and restart the bot"
),
]
)
)
idle()
bot.stop()
|
scheduler_job.py | # pylint: disable=no-name-in-module
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import itertools
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import ExitStack, redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Any, DefaultDict, Dict, Iterable, List, Optional, Set, Tuple
import tenacity
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import load_only, selectinload
from sqlalchemy.orm.session import Session, make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.executors.executor_loader import UNPICKLEABLE_EXECUTORS
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKey
from airflow.stats import Stats
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.utils import timezone
from airflow.utils.callback_requests import (
CallbackRequest, DagCallbackRequest, SlaCallbackRequest, TaskCallbackRequest,
)
from airflow.utils.dag_processing import AbstractDagFileProcessorProcess, DagFileProcessorAgent
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import is_lock_not_available_error, prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
callback_requests: List[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of Scheduler.process_file(file_path).
self._result: Optional[Tuple[int, int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
callback_requests: List[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param parent_channel: the parent end of the channel to close in the child
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
log.info("Closing parent pipe")
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
with ExitStack() as exit_stack:
exit_stack.enter_context(redirect_stdout(StreamLogWriter(log, logging.INFO))) # type: ignore
exit_stack.enter_context(redirect_stderr(StreamLogWriter(log, logging.WARN))) # type: ignore
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception: # pylint: disable=broad-except
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""
Launch the process and start processing the DAG.
"""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
"DagFileProcessor{}".format(self._instance_id),
self._callback_requests
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""
Kill the process launched to process the file, and ensure consistent state.
"""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[int, int]]:
"""
:return: result of running SchedulerJob.process_file()
:rtype: tuple[int, int] or None
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti')
)
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(
or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED
)
)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis: List[TI] = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if not isinstance(task.sla, timedelta):
continue
dttm = dag.following_schedule(ti.execution_date)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas: List[SlaMiss] = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas: # pylint: disable=too-many-nested-blocks
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info('Calling SLA miss callback')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception: # pylint: disable=broad-except
self.log.exception("Could not call sla_miss_callback for DAG %s", dag.dag_id)
email_content = f"""\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}<code></pre>
"""
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.",
sla.task_id)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
f"[airflow] SLA miss on DAG={dag.dag_id}",
email_content
)
email_sent = True
notification_sent = True
except Exception: # pylint: disable=broad-except
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
sla.email_sent = email_sent
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(errors.ImportError(
filename=filename,
timestamp=timezone.utcnow(),
stacktrace=stacktrace))
session.commit()
@provide_session
def execute_callbacks(
self,
dagbag: DagBag,
callback_requests: List[CallbackRequest],
session: Session = None
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param dagbag: Dag Bag of dags
:param callback_requests: failure callbacks to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:param session: DB session.
"""
for request in callback_requests:
try:
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(dagbag, request)
elif isinstance(request, SlaCallbackRequest):
self.manage_slas(dagbag.dags.get(request.dag_id))
elif isinstance(request, DagCallbackRequest):
self._execute_dag_callbacks(dagbag, request, session)
except Exception: # pylint: disable=broad-except
self.log.exception(
"Error executing %s callback for file: %s",
request.__class__.__name__,
request.full_filepath
)
session.commit()
@provide_session
def _execute_dag_callbacks(self, dagbag: DagBag, request: DagCallbackRequest, session: Session):
dag = dagbag.dags[request.dag_id]
dag_run = dag.get_dagrun(execution_date=request.execution_date, session=session)
dag.handle_callback(
dagrun=dag_run,
success=not request.is_failure_callback,
reason=request.msg,
session=session
)
def _execute_task_callbacks(self, dagbag: DagBag, request: TaskCallbackRequest):
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
ti = TI(task, simple_ti.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = simple_ti.start_date
ti.end_date = simple_ti.end_date
ti.try_number = simple_ti.try_number
ti.state = simple_ti.state
ti.test_mode = self.UNIT_TEST_MODE
if request.is_failure_callback:
ti.handle_failure(request.msg, ti.test_mode, ti.get_template_context())
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
@provide_session
def process_file(
self,
file_path: str,
callback_requests: List[CallbackRequest],
pickle_dags: bool = False,
session: Session = None
) -> Tuple[int, int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of serialized_dag dicts that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.dag_processing.CallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:param session: Sqlalchemy ORM Session
:type session: Session
:return: number of dags found, count of import errors
:rtype: Tuple[int, int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception: # pylint: disable=broad-except
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return 0, 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return 0, len(dagbag.import_errors)
self.execute_callbacks(dagbag, callback_requests)
# Save individual DAGs in the ORM
dagbag.read_dags_from_db = True
# Retry 'dagbag.sync_to_db()' in case of any Operational Errors
# In case of failures, provide_session handles rollback
for attempt in tenacity.Retrying(
retry=tenacity.retry_if_exception_type(exception_types=OperationalError),
wait=tenacity.wait_random_exponential(multiplier=0.5, max=5),
stop=tenacity.stop_after_attempt(settings.MAX_DB_RETRIES),
before_sleep=tenacity.before_sleep_log(self.log, logging.DEBUG),
reraise=True
):
with attempt:
self.log.debug(
"Running dagbag.sync_to_db with retries. Try %d of %d",
attempt.retry_state.attempt_number,
settings.MAX_DB_RETRIES
)
dagbag.sync_to_db()
if pickle_dags:
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
for dag in unpaused_dags:
dag.pickle(session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception: # pylint: disable=broad-except
self.log.exception("Error logging import errors!")
return len(dagbag.dags), len(dagbag.import_errors)
class SchedulerJob(BaseJob): # pylint: disable=too-many-instance-attributes
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: str
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to run the scheduling loop. If you
have a large number of DAG files this could complete before each file
has been parsed. -1 for unlimited times.
:type num_runs: int
:param num_times_parse_dags: The number of times to try to parse each DAG file.
-1 for unlimited times.
:type num_times_parse_dags: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate: int = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
subdir: str = settings.DAGS_FOLDER,
num_runs: int = conf.getint('scheduler', 'num_runs'),
num_times_parse_dags: int = -1,
processor_poll_interval: float = conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle: bool = False,
log: Any = None,
*args, **kwargs):
self.subdir = subdir
self.num_runs = num_runs
# In specific tests, we want to stop the parse loop after the _files_ have been parsed a certain
# number of times. This is only to support testing, and is n't something a user is likely to want to
# conifugre -- they'll want num_runs
self.num_times_parse_dags = num_times_parse_dags
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
if log:
self._log = log
# Check what SQL backend we use
sql_conn: str = conf.get('core', 'sql_alchemy_conn').lower()
self.using_sqlite = sql_conn.startswith('sqlite')
self.using_mysql = sql_conn.startswith('mysql')
self.max_tis_per_query: int = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent: Optional[DagFileProcessorAgent] = None
self.dagbag = DagBag(read_dags_from_db=True)
def register_exit_signals(self) -> None:
"""
Register signals that stop child processes
"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(
self,
old_states: List[str],
new_state: str,
session: Session = None
) -> None:
"""
For all DAG IDs in the DagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.TaskInstance.dag_run) \
.filter(models.TaskInstance.dag_id.in_(list(self.dagbag.dag_ids))) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
# pylint: disable=comparison-with-callable
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None))) # pylint: disable=no-member
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change: List[TI] = with_row_locks(query).all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
current_time = timezone.utcnow()
ti_prop_update = {
models.TaskInstance.state: new_state,
models.TaskInstance.start_date: current_time,
}
# Only add end_date and duration if the new_state is 'success', 'failed' or 'skipped'
if new_state in State.finished:
ti_prop_update.update({
models.TaskInstance.end_date: current_time,
models.TaskInstance.duration: 0,
})
tis_changed = session \
.query(models.TaskInstance) \
.filter(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date) \
.update(ti_prop_update, synchronize_session=False)
session.flush()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(
self, states: List[str], session: Session = None
) -> Tuple[DefaultDict[str, int], DefaultDict[Tuple[str, str], int]]:
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: tuple[dict[str, int], dict[tuple[str, str], int]]
"""
ti_concurrency_query: List[Tuple[str, str, int]] = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map: DefaultDict[str, int] = defaultdict(int)
task_map: DefaultDict[Tuple[str, str], int] = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
# pylint: disable=too-many-locals,too-many-statements
@provide_session
def _executable_task_instances_to_queued(self, max_tis: int, session: Session = None) -> List[TI]:
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param max_tis: Maximum number of TIs to queue in this loop.
:type max_tis: int
:return: list[airflow.models.TaskInstance]
"""
executable_tis: List[TI] = []
# Get the pool settings. We get a lock on the pool rows, treating this as a "critical section"
# Throws an exception if lock cannot be obtained, rather than blocking
pools = models.Pool.slots_stats(lock_rows=True, session=session)
# If the pools are full, there is no point doing anything!
# If _somehow_ the pool is overfull, don't let the limit go negative - it breaks SQL
pool_slots_free = max(0, sum(pool['open'] for pool in pools.values()))
if pool_slots_free == 0:
self.log.debug("All pools are full!")
return executable_tis
max_tis = min(max_tis, pool_slots_free)
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
query = (
session
.query(TI)
.outerjoin(TI.dag_run)
.filter(or_(DR.run_id.is_(None),
DR.run_type != DagRunType.BACKFILL_JOB.value))
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model'))
.limit(max_tis)
)
task_instances_to_examine: List[TI] = with_row_locks(
query,
of=TI,
**skip_locked(session=session),
).all()
# TODO[HA]: This was wrong before anyway, as it only looked at a sub-set of dags, not everything.
# Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
pool_to_task_instances: DefaultDict[str, List[models.Pool]] = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map: DefaultDict[str, int]
task_concurrency_map: DefaultDict[Tuple[str, str], int]
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
open_slots = pools[pool]["open"]
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = task_instance.dag_model.concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit: Optional[int] = None
if task_instance.dag_model.has_task_concurrency_limits:
# Many dags don't have a task_concurrency, so where we can avoid loading the full
# serialized DAG the better.
serialized_dag = self.dagbag.get_dag(dag_id, session=session)
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id).task_concurrency
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if task_instance.pool_slots > open_slots:
self.log.info("Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance, task_instance.pool_slots, open_slots, pool)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge(f'pool.starving_tasks.{pool_name}', num_starving_tasks)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(executable_tis)
session.query(TI).filter(filter_for_tis).update(
# TODO[ha]: should we use func.now()? How does that work with DB timezone on mysql when it's not
# UTC?
{TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow(), TI.queued_by_job_id: self.id},
synchronize_session=False
)
for ti in executable_tis:
make_transient(ti)
return executable_tis
def _enqueue_task_instances_with_queued_state(
self,
task_instances: List[TI]
) -> None:
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param task_instances: TaskInstances to enqueue
:type task_instances: list[TaskInstance]
"""
# actually enqueue them
for ti in task_instances:
command = TI.generate_command(
ti.dag_id,
ti.task_id,
ti.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=ti.pool,
file_path=ti.dag_model.fileloc,
pickle_id=ti.dag_model.pickle_id,
)
priority = ti.priority_weight
queue = ti.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
ti.key, priority, queue
)
self.executor.queue_command(
ti,
command,
priority=priority,
queue=queue,
)
def _critical_section_execute_task_instances(self, session: Session) -> int:
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
HA note: This function is a "critical section" meaning that only a single executor process can execute
this function at the same time. This is achieved by doing ``SELECT ... from pool FOR UPDATE``. For DBs
that support NOWAIT, a "blocked" scheduler will skip this and continue on with other tasks (creating
new DAG runs, progressing TIs from None to SCHEDULED etc.); DBs that don't support this (such as
MariaDB or MySQL 5.x) the other schedulers will wait for the lock before continuing.
:param session:
:type session: sqlalchemy.orm.Session
:return: Number of task instance with state changed.
"""
max_tis = min(self.max_tis_per_query, self.executor.slots_available)
queued_tis = self._executable_task_instances_to_queued(max_tis, session=session)
self._enqueue_task_instances_with_queued_state(queued_tis)
return len(queued_tis)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session: Session = None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if not self.executor.queued_tasks:
return
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = session.query(TI).filter(or_(*filter_for_ti_state_change))
tis_to_set_to_scheduled: List[TI] = with_row_locks(ti_query).all()
if not tis_to_set_to_scheduled:
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(repr(x) for x in tis_to_set_to_scheduled)
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, session: Session = None) -> int:
"""
Respond to executor events.
"""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
ti_primary_key_to_try_number_map: Dict[Tuple[str, str, datetime.datetime], int] = {}
event_buffer = self.executor.get_event_buffer()
tis_with_right_state: List[TaskInstanceKey] = []
# Report execution
for ti_key, value in event_buffer.items():
state: str
state, _ = value
# We create map (dag_id, task_id, execution_date) -> in-memory try_number
ti_primary_key_to_try_number_map[ti_key.primary] = ti_key.try_number
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
ti_key.dag_id, ti_key.task_id, ti_key.execution_date, state, ti_key.try_number
)
if state in (State.FAILED, State.SUCCESS, State.QUEUED):
tis_with_right_state.append(ti_key)
# Return if no finished tasks
if not tis_with_right_state:
return len(event_buffer)
# Check state of finished tasks
filter_for_tis = TI.filter_for_tis(tis_with_right_state)
tis: List[TI] = session.query(TI).filter(filter_for_tis).options(selectinload('dag_model')).all()
for ti in tis:
try_number = ti_primary_key_to_try_number_map[ti.key.primary]
buffer_key = ti.key.with_try_number(try_number)
state, info = event_buffer.pop(buffer_key)
# TODO: should we fail RUNNING as well, as we do in Backfills?
if state == State.QUEUED:
ti.external_executor_id = info
self.log.info("Setting external_id for %s to %s", ti, info)
continue
if ti.try_number == buffer_key.try_number and ti.state == State.QUEUED:
Stats.incr('scheduler.tasks.killed_externally')
msg = "Executor reports task instance %s finished (%s) although the " \
"task says its %s. (Info: %s) Was the task killed externally?"
self.log.error(msg, ti, state, ti.state, info)
request = TaskCallbackRequest(
full_filepath=ti.dag_model.fileloc,
simple_task_instance=SimpleTaskInstance(ti),
msg=msg % (ti, state, ti.state, info),
)
self.processor_agent.send_callback_to_execute(request)
return len(event_buffer)
def _execute(self) -> None:
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
self.log.info("Processing each file at most %s times", self.num_times_parse_dags)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds: int = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(
dag_directory=self.subdir,
max_runs=self.num_times_parse_dags,
processor_factory=type(self)._create_dag_file_processor,
processor_timeout=processor_timeout,
dag_ids=[],
pickle_dags=pickle_dags,
async_mode=async_mode,
)
try:
self.executor.job_id = self.id
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.adopt_or_reset_orphaned_tasks()
self.register_exit_signals()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
self._run_scheduler_loop()
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove() # type: ignore
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing SchedulerJob._run_scheduler_loop")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(
file_path: str,
callback_requests: List[CallbackRequest],
dag_ids: Optional[List[str]],
pickle_dags: bool
) -> DagFileProcessorProcess:
"""
Creates DagFileProcessorProcess instance.
"""
return DagFileProcessorProcess(
file_path=file_path,
pickle_dags=pickle_dags,
dag_ids=dag_ids,
callback_requests=callback_requests
)
def _run_scheduler_loop(self) -> None:
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
is_unit_test: bool = conf.getboolean('core', 'unit_test_mode')
for loop_count in itertools.count(start=1):
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.run_single_parsing_loop()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
with create_session() as session:
num_queued_tis = self._do_scheduling(session)
self.executor.heartbeat()
session.expunge_all()
num_finished_events = self._process_executor_events(session=session)
self.processor_agent.heartbeat()
# Heartbeat the scheduler periodically
self.heartbeat(only_if_necessary=True)
self._emit_pool_metrics()
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug("Ran scheduling loop in %.2f seconds", loop_duration)
if not is_unit_test and not num_queued_tis and not num_finished_events:
# If the scheduler is doing things, don't sleep. This means when there is work to do, the
# scheduler will run "as quick as possible", but when it's stopped, it can sleep, dropping CPU
# usage when "idle"
time.sleep(self._processor_poll_interval)
if loop_count >= self.num_runs > 0:
self.log.info(
"Exiting scheduler loop as requested number of runs (%d - got to %d) has been reached",
self.num_runs, loop_count,
)
break
if self.processor_agent.done:
self.log.info(
"Exiting scheduler loop as requested DAG parse count (%d) has been reached after %d "
" scheduler loops",
self.num_times_parse_dags, loop_count,
)
break
def _do_scheduling(self, session) -> int:
"""
This function is where the main scheduling decisions take places. It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
The reason we don't select all dagruns at once because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_execute_task_instances for more.
:return: Number of TIs enqueued in this iteration
:rtype: int
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
query = DagModel.dags_needing_dagruns(session)
self._create_dag_runs(query.all(), session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagruns
dag_runs = DagRun.next_dagruns_to_examine(session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
# TODO: This query is probably horribly inefficient (though there is an
# index on (dag_id,state)). It is to deal with the case when a user
# clears more than max_active_runs older tasks -- we don't want the
# scheduler to suddenly go and start running tasks from all of the
# runs. (AIRFLOW-137/GH #1442)
#
# The longer term fix would be to have `clear` do this, and put DagRuns
# in to the queued state, then take DRs out of queued before creating
# any new ones
# TODO[HA]: Why is this on TI, not on DagRun??
currently_active_runs = dict(session.query(
TI.dag_id,
func.count(TI.execution_date.distinct()),
).filter(
TI.dag_id.in_(list({dag_run.dag_id for dag_run in dag_runs})),
TI.state.notin_(list(State.finished))
).group_by(TI.dag_id).all())
for dag_run in dag_runs:
self._schedule_dag_run(dag_run, currently_active_runs.get(dag_run.dag_id, 0), session)
guard.commit()
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
# TODO[HA]: Do we need to do it every time?
try:
self._change_state_for_tis_without_dagrun(
old_states=[State.UP_FOR_RETRY],
new_state=State.FAILED,
session=session
)
self._change_state_for_tis_without_dagrun(
old_states=[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE,
State.SENSING],
new_state=State.NONE,
session=session
)
guard.commit()
except OperationalError as e:
if is_lock_not_available_error(error=e):
self.log.debug("Lock held by another Scheduler")
session.rollback()
else:
raise
try:
if self.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
return 0
timer = Stats.timer('scheduler.critical_section_duration')
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_execute_task_instances(session=session)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr('scheduler.critical_section_busy')
session.rollback()
return 0
raise
return num_queued_tis
def _create_dag_runs(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
for dag_model in dag_models:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
self._update_dag_next_dagruns(dag_models, session)
# TODO[HA]: Should we do a session.flush() so we don't have to keep lots of state/object in
# memory for larger dags? or expunge_all()
def _update_dag_next_dagruns(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
# Check max_active_runs, to see if we are _now_ at the limit for any of
# these dag? (we've just created a DagRun for them after all)
active_runs_of_dags = dict(session.query(DagRun.dag_id, func.count('*')).filter(
DagRun.dag_id.in_([o.dag_id for o in dag_models]),
DagRun.state == State.RUNNING, # pylint: disable=comparison-with-callable
DagRun.external_trigger.is_(False),
).group_by(DagRun.dag_id).all())
for dag_model in dag_models:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
active_runs_of_dag = active_runs_of_dags.get(dag.dag_id, 0)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id, active_runs_of_dag, dag.max_active_runs
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = \
dag.next_dagrun_info(dag_model.next_dagrun)
def _schedule_dag_run(self, dag_run: DagRun, currently_active_runs: int, session: Session) -> int:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:param currently_active_runs: Number of currently active runs of this DAG
:return: Number of tasks scheduled
"""
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
if not dag:
self.log.error(
"Couldn't find dag %s in DagBag/DB!", dag_run.dag_id
)
return 0
if (
dag_run.start_date and dag.dagrun_timeout and
dag_run.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dag_run.state = State.FAILED
dag_run.end_date = timezone.utcnow()
self.log.info("Run %s of %s has timed-out", dag_run.run_id, dag_run.dag_id)
session.flush()
# Work out if we should allow creating a new DagRun now?
self._update_dag_next_dagruns([session.query(DagModel).get(dag_run.dag_id)], session)
callback_to_execute = DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=dag.dag_id,
execution_date=dag_run.execution_date,
is_failure_callback=True,
msg='timed_out'
)
# Send SLA & DAG Success/Failure Callbacks to be executed
self._send_dag_callbacks_to_processor(dag_run, callback_to_execute)
return 0
if dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error(
"Execution date is in future: %s",
dag_run.execution_date
)
return 0
if dag.max_active_runs:
if currently_active_runs >= dag.max_active_runs:
self.log.info(
"DAG %s already has %d active runs, not queuing any more tasks",
dag.dag_id,
currently_active_runs,
)
return 0
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
# TODO[HA]: Rename update_state -> schedule_dag_run, ?? something else?
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
self._send_dag_callbacks_to_processor(dag_run, callback_to_run)
# Get list of TIs that do not need to executed, these are
# tasks using DummyOperator and without on_execute_callback / on_success_callback
dummy_tis = [
ti for ti in schedulable_tis
if
(
ti.task.task_type == "DummyOperator"
and not ti.task.on_execute_callback
and not ti.task.on_success_callback
)
]
# This will do one query per dag run. We "could" build up a complex
# query to update all the TIs across all the execution dates and dag
# IDs in a single query, but it turns out that can be _very very slow_
# see #11147/commit ee90807ac for more details
count = session.query(TI).filter(
TI.dag_id == dag_run.dag_id,
TI.execution_date == dag_run.execution_date,
TI.task_id.in_(ti.task_id for ti in schedulable_tis if ti not in dummy_tis)
).update({TI.state: State.SCHEDULED}, synchronize_session=False)
# Tasks using DummyOperator should not be executed, mark them as success
if dummy_tis:
session.query(TI).filter(
TI.dag_id == dag_run.dag_id,
TI.execution_date == dag_run.execution_date,
TI.task_id.in_(ti.task_id for ti in dummy_tis)
).update({
TI.state: State.SUCCESS,
TI.start_date: timezone.utcnow(),
TI.end_date: timezone.utcnow(),
TI.duration: 0
}, synchronize_session=False)
return count
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_dag_callbacks_to_processor(
self,
dag_run: DagRun,
callback: Optional[DagCallbackRequest] = None
):
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
dag = dag_run.get_dag()
self._send_sla_callbacks_to_processor(dag)
if callback:
self.processor_agent.send_callback_to_execute(callback)
def _send_sla_callbacks_to_processor(self, dag: DAG):
"""Sends SLA Callbacks to DagFileProcessor if tasks have SLAs set and check_slas=True"""
if not settings.CHECK_SLAS:
return
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.debug("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
self.processor_agent.send_sla_callback_request_to_execute(
full_filepath=dag.fileloc,
dag_id=dag.dag_id
)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED])
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING])
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
timeout = conf.getint('scheduler', 'scheduler_health_check_threshold')
num_failed = session.query(SchedulerJob).filter(
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat < (timezone.utcnow() - timedelta(seconds=timeout))
).update({"state": State.FAILED})
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + '_end', num_failed)
resettable_states = [State.SCHEDULED, State.QUEUED, State.RUNNING]
query = (
session.query(TI).filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(or_(TI.queued_by_job_id.is_(None), SchedulerJob.state != State.RUNNING))
.join(TI.dag_run)
.filter(DagRun.run_type != DagRunType.BACKFILL_JOB.value,
# pylint: disable=comparison-with-callable
DagRun.state == State.RUNNING)
.options(load_only(TI.dag_id, TI.task_id, TI.execution_date))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(query, of=TI, **skip_locked(session=session)).all()
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr('scheduler.orphaned_tasks.cleared', len(to_reset))
Stats.incr('scheduler.orphaned_tasks.adopted', len(tis_to_reset_or_adopt) - len(to_reset))
if to_reset:
task_instance_str = '\n\t'.join(reset_tis_message)
self.log.info("Reset the following %s orphaned TaskInstances:\n\t%s",
len(to_reset), task_instance_str)
# Issue SQL/finish "Unit of Work", but let @provide_session commit (or if passed a session, let caller
# decide when to commit
session.flush()
return len(to_reset)
|
semaphore.py | import threading
import time
# For signaling, the semaphore is initialized to 0; for mutual exclusion, the initial value is 1; for multiplexing, the initial value is a positive number greater than 1.
semaphore = threading.Semaphore(0)
def consumer():
print("The begining of consumer function")
semaphore.acquire()
print(f"consumer got notified: the value is {item}")
print("The end of consumer function")
def producer():
global item
print("The begining of producer function")
time.sleep(1)
item = 100
print(f"producer send's the value {item}")
semaphore.release()
print("The end of producer function")
if __name__ == "__main__":
producer_thread = threading.Thread(target=producer)
consumer_thread = threading.Thread(target=consumer)
for thread in (producer_thread, consumer_thread):
thread.start()
for thread in (producer_thread, consumer_thread):
thread.join()
print("The end of main function")
|
test_debug.py | import importlib
import inspect
import os
import re
import sys
import tempfile
import threading
from io import StringIO
from pathlib import Path
from unittest import mock
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
from django.http import Http404
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin
from django.urls import path, reverse
from django.urls.converters import IntConverter
from django.utils.functional import SimpleLazyObject
from django.utils.regex_helper import _lazy_re_compile
from django.utils.safestring import mark_safe
from django.views.debug import (
CallableSettingWrapper, ExceptionCycleWarning, ExceptionReporter,
Path as DebugPath, SafeExceptionReporterFilter, default_urlconf,
get_default_exception_reporter_filter, technical_404_response,
technical_500_response,
)
from django.views.decorators.debug import (
sensitive_post_parameters, sensitive_variables,
)
from ..views import (
custom_exception_reporter_filter_view, index_page,
multivalue_dict_key_error, non_sensitive_view, paranoid_view,
sensitive_args_function_caller, sensitive_kwargs_function_caller,
sensitive_method_view, sensitive_view,
)
class User:
def __str__(self):
return 'jacob'
class WithoutEmptyPathUrls:
urlpatterns = [path('url/', index_page, name='url')]
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable:
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
class DebugViewTests(SimpleTestCase):
def test_files(self):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
with self.assertLogs('django.request', 'ERROR'):
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.security', 'WARNING'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_400_bad_request(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.request', 'WARNING') as cm:
response = self.client.get('/raises400_bad_request/')
self.assertContains(response, '<div class="context" id="', status_code=400)
self.assertEqual(
cm.records[0].getMessage(),
'Malformed request syntax: /raises400_bad_request/',
)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
self.assertContains(
response,
'<p>The current path, <code>not-in-urls</code>, didn’t match any '
'of these.</p>',
status_code=404,
html=True,
)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "Django tried these URL patterns", status_code=404)
self.assertContains(
response,
'<p>The current path, <code>not-in-urls</code>, didn’t match any '
'of these.</p>',
status_code=404,
html=True,
)
# Pattern and view name of a RegexURLPattern appear.
self.assertContains(response, r"^regex-post/(?P<pk>[0-9]+)/$", status_code=404)
self.assertContains(response, "[name='regex-post']", status_code=404)
# Pattern and view name of a RoutePattern appear.
self.assertContains(response, r"path-post/<int:pk>/", status_code=404)
self.assertContains(response, "[name='path-post']", status_code=404)
@override_settings(ROOT_URLCONF=WithoutEmptyPathUrls)
def test_404_empty_path_not_in_urls(self):
response = self.client.get('/')
self.assertContains(
response,
'<p>The empty path didn’t match any of these.</p>',
status_code=404,
html=True,
)
def test_technical_404(self):
response = self.client.get('/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
self.assertContains(
response,
'<p>The current path, <code>technical404/</code>, matched the '
'last one.</p>',
status_code=404,
html=True,
)
def test_classbased_technical_404(self):
response = self.client.get('/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match['id']
self.assertFalse(
re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr.decode()
)
def test_template_exceptions(self):
with self.assertLogs('django.request', 'ERROR'):
try:
self.client.get(reverse('template_exception'))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(
raising_loc.find("raise Exception('boom')"), -1,
"Failed to find 'raise Exception' in last frame of "
"traceback, instead found: %s" % raising_loc
)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]), self.assertLogs('django.request', 'ERROR'):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
# Assert as HTML.
self.assertContains(
response,
'<li><code>django.template.loaders.filesystem.Loader</code>: '
'%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'),
status_code=500,
html=True,
)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
with self.assertLogs('django.request', 'ERROR'):
with self.assertRaises(TemplateDoesNotExist):
self.client.get('/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown shown instead
of the technical 404 page, if the user has not altered their
URLconf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>The install worked successfully! Congratulations!</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
def test_template_encoding(self):
"""
The templates are loaded directly, not via a template loader, and
should be opened as utf-8 charset as is the default specified on
template engines.
"""
with mock.patch.object(DebugPath, 'open') as m:
default_urlconf(None)
m.assert_called_once_with(encoding='utf-8')
m.reset_mock()
technical_404_response(mock.MagicMock(), mock.Mock())
m.assert_called_once_with(encoding='utf-8')
def test_technical_404_converter_raise_404(self):
with mock.patch.object(IntConverter, 'to_python', side_effect=Http404):
response = self.client.get('/path-post/1/')
self.assertContains(response, 'Page not found', status_code=404)
def test_exception_reporter_from_request(self):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/custom_reporter_class_view/')
self.assertContains(response, 'custom traceback text', status_code=500)
@override_settings(DEFAULT_EXCEPTION_REPORTER='view_tests.views.CustomExceptionReporter')
def test_exception_reporter_from_settings(self):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises500/')
self.assertContains(response, 'custom traceback text', status_code=500)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
databases = {'default'}
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF='view_tests.urls',
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.security', 'WARNING'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_400_bad_request(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.request', 'WARNING') as cm:
response = self.client.get('/raises400_bad_request/')
self.assertContains(response, '<div class="context" id="', status_code=400)
self.assertEqual(
cm.records[0].getMessage(),
'Malformed request syntax: /raises400_bad_request/',
)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>jacob</p>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
self.assertIn('<p>No POST data</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""The ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = ['print %d' % i for i in range(1, 6)]
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, (newline.join(LINES) + newline).encode())
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_suppressed_context(self):
try:
try:
raise RuntimeError("Can't find my keys")
except RuntimeError:
raise ValueError("Can't find my keys") from None
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
self.assertNotIn('During handling of the above exception', html)
def test_reporting_of_nested_exceptions(self):
request = self.rf.get('/test_view/')
try:
try:
raise AttributeError(mark_safe('<p>Top level</p>'))
except AttributeError as explicit:
try:
raise ValueError(mark_safe('<p>Second exception</p>')) from explicit
except ValueError:
raise IndexError(mark_safe('<p>Final exception</p>'))
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:'
implicit_exc = 'During handling of the above exception ({0}), another exception occurred:'
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(2, html.count(explicit_exc.format('<p>Top level</p>')))
self.assertEqual(2, html.count(implicit_exc.format('<p>Second exception</p>')))
self.assertEqual(10, html.count('<p>Final exception</p>'))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format('<p>Top level</p>'), text)
self.assertIn(implicit_exc.format('<p>Second exception</p>'), text)
self.assertEqual(3, text.count('<p>Final exception</p>'))
def test_reporting_frames_without_source(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, 'generated', 'exec')
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame['context_line'], '<source code not available>')
self.assertEqual(last_frame['filename'], 'generated')
self.assertEqual(last_frame['function'], 'funcName')
self.assertEqual(last_frame['lineno'], 2)
html = reporter.get_traceback_html()
self.assertIn(
'<span class="fname">generated</span>, line 2, in funcName',
html,
)
self.assertIn(
'<code class="fname">generated</code>, line 2, in funcName',
html,
)
self.assertIn(
'"generated", line 2, in funcName\n'
' <source code not available>',
html,
)
text = reporter.get_traceback_text()
self.assertIn(
'"generated", line 2, in funcName\n'
' <source code not available>',
text,
)
def test_reporting_frames_source_not_match(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, 'generated', 'exec')
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
with mock.patch(
'django.views.debug.ExceptionReporter._get_source',
return_value=['wrong source'],
):
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame['context_line'], '<source code not available>')
self.assertEqual(last_frame['filename'], 'generated')
self.assertEqual(last_frame['function'], 'funcName')
self.assertEqual(last_frame['lineno'], 2)
html = reporter.get_traceback_html()
self.assertIn(
'<span class="fname">generated</span>, line 2, in funcName',
html,
)
self.assertIn(
'<code class="fname">generated</code>, line 2, in funcName',
html,
)
self.assertIn(
'"generated", line 2, in funcName\n'
' <source code not available>',
html,
)
text = reporter.get_traceback_text()
self.assertIn(
'"generated", line 2, in funcName\n'
' <source code not available>',
text,
)
def test_reporting_frames_for_cyclic_reference(self):
try:
def test_func():
try:
raise RuntimeError('outer') from RuntimeError('inner')
except RuntimeError as exc:
raise exc.__cause__
test_func()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
def generate_traceback_frames(*args, **kwargs):
nonlocal tb_frames
tb_frames = reporter.get_traceback_frames()
tb_frames = None
tb_generator = threading.Thread(target=generate_traceback_frames, daemon=True)
msg = (
"Cycle in the exception chain detected: exception 'inner' "
"encountered again."
)
with self.assertWarnsMessage(ExceptionCycleWarning, msg):
tb_generator.start()
tb_generator.join(timeout=5)
if tb_generator.is_alive():
# tb_generator is a daemon that runs until the main thread/process
# exits. This is resource heavy when running the full test suite.
# Setting the following values to None makes
# reporter.get_traceback_frames() exit early.
exc_value.__traceback__ = exc_value.__context__ = exc_value.__cause__ = None
tb_generator.join()
self.fail('Cyclic reference in Exception Reporter.get_traceback_frames()')
if tb_frames is None:
# can happen if the thread generating traceback got killed
# or exception while generating the traceback
self.fail('Traceback generation failed')
last_frame = tb_frames[-1]
self.assertIn('raise exc.__cause__', last_frame['context_line'])
self.assertEqual(last_frame['filename'], __file__)
self.assertEqual(last_frame['function'], 'test_func')
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_local_variable_escaping(self):
"""Safe strings in local variables are escaped."""
try:
local = mark_safe('<p>Local variable</p>')
raise ValueError(local)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
html = ExceptionReporter(None, exc_type, exc_value, tb).get_traceback_html()
self.assertIn('<td class="code"><pre>'<p>Local variable</p>'</pre></td>', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput:
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput:
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
def test_encoding_error(self):
"""
A UnicodeError displays a portion of the problematic string. HTML in
safe strings is escaped.
"""
try:
mark_safe('abcdefghijkl<p>mnὀp</p>qrstuwxyz').encode('ascii')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h2>Unicode error hint</h2>', html)
self.assertIn('The string that could not be encoded/decoded was: ', html)
self.assertIn('<strong><p>mnὀp</p></strong>', html)
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ModuleNotFoundError at /test_view/</h1>', html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
self.assertIn(
"BrokenEvaluation",
ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(),
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
value = '<td>items</td><td class="code"><pre>'Oops'</pre></td>'
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre><InMemoryUploadedFile: '
'items (application/octet-stream)></pre></td>',
html
)
# COOKIES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<td>items</td><td class="code"><pre>'Oops'</pre></td>', html)
def test_exception_fetching_user(self):
"""
The error page can be rendered if the current user can't be retrieved
(such as when the database is unavailable).
"""
class ExceptionUser:
def __str__(self):
raise Exception()
request = self.rf.get('/test_view/')
request.user = ExceptionUser()
try:
raise ValueError('Oops')
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>[unable to retrieve the current user]</p>', html)
text = reporter.get_traceback_text()
self.assertIn('USER: [unable to retrieve the current user]', text)
def test_template_encoding(self):
"""
The templates are loaded directly, not via a template loader, and
should be opened as utf-8 charset as is the default specified on
template engines.
"""
reporter = ExceptionReporter(None, None, None, None)
with mock.patch.object(DebugPath, 'open') as m:
reporter.get_traceback_html()
m.assert_called_once_with(encoding='utf-8')
m.reset_mock()
reporter.get_traceback_text()
m.assert_called_once_with(encoding='utf-8')
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('USER: jacob', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback (most recent call last):', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertNotIn('USER:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback (most recent call last):', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(DEBUG=True)
def test_template_exception(self):
request = self.rf.get('/test_view/')
try:
render(request, 'debug/template_error.html')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
templ_path = Path(Path(__file__).parents[1], 'templates', 'debug', 'template_error.html')
self.assertIn(
'Template error:\n'
'In template %(path)s, error at line 2\n'
' \'cycle\' tag requires at least two arguments\n'
' 1 : Template with error:\n'
' 2 : {%% cycle %%} \n'
' 3 : ' % {'path': templ_path},
text
)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn('items = <InMemoryUploadedFile:', text)
# COOKIES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin:
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {
'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value',
}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = str(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports (#14614).
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
No POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
The sensitive_variables decorator works with object methods.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view, check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as arguments to the decorated
function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as keyword arguments to the
decorated function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots:
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_cleanse_setting_basic(self):
reporter_filter = SafeExceptionReporterFilter()
self.assertEqual(reporter_filter.cleanse_setting('TEST', 'TEST'), 'TEST')
self.assertEqual(
reporter_filter.cleanse_setting('PASSWORD', 'super_secret'),
reporter_filter.cleansed_substitute,
)
def test_cleanse_setting_ignore_case(self):
reporter_filter = SafeExceptionReporterFilter()
self.assertEqual(
reporter_filter.cleanse_setting('password', 'super_secret'),
reporter_filter.cleansed_substitute,
)
def test_cleanse_setting_recurses_in_dictionary(self):
reporter_filter = SafeExceptionReporterFilter()
initial = {'login': 'cooper', 'password': 'secret'}
self.assertEqual(
reporter_filter.cleanse_setting('SETTING_NAME', initial),
{'login': 'cooper', 'password': reporter_filter.cleansed_substitute},
)
def test_cleanse_setting_recurses_in_dictionary_with_non_string_key(self):
reporter_filter = SafeExceptionReporterFilter()
initial = {('localhost', 8000): {'login': 'cooper', 'password': 'secret'}}
self.assertEqual(
reporter_filter.cleanse_setting('SETTING_NAME', initial),
{
('localhost', 8000): {
'login': 'cooper',
'password': reporter_filter.cleansed_substitute,
},
},
)
def test_cleanse_setting_recurses_in_list_tuples(self):
reporter_filter = SafeExceptionReporterFilter()
initial = [
{
'login': 'cooper',
'password': 'secret',
'apps': (
{'name': 'app1', 'api_key': 'a06b-c462cffae87a'},
{'name': 'app2', 'api_key': 'a9f4-f152e97ad808'},
),
'tokens': ['98b37c57-ec62-4e39', '8690ef7d-8004-4916'],
},
{'SECRET_KEY': 'c4d77c62-6196-4f17-a06b-c462cffae87a'},
]
cleansed = [
{
'login': 'cooper',
'password': reporter_filter.cleansed_substitute,
'apps': (
{'name': 'app1', 'api_key': reporter_filter.cleansed_substitute},
{'name': 'app2', 'api_key': reporter_filter.cleansed_substitute},
),
'tokens': reporter_filter.cleansed_substitute,
},
{'SECRET_KEY': reporter_filter.cleansed_substitute},
]
self.assertEqual(
reporter_filter.cleanse_setting('SETTING_NAME', initial),
cleansed,
)
self.assertEqual(
reporter_filter.cleanse_setting('SETTING_NAME', tuple(initial)),
tuple(cleansed),
)
def test_request_meta_filtering(self):
request = self.rf.get('/', HTTP_SECRET_HEADER='super_secret')
reporter_filter = SafeExceptionReporterFilter()
self.assertEqual(
reporter_filter.get_safe_request_meta(request)['HTTP_SECRET_HEADER'],
reporter_filter.cleansed_substitute,
)
def test_exception_report_uses_meta_filtering(self):
response = self.client.get('/raises500/', HTTP_SECRET_HEADER='super_secret')
self.assertNotIn(b'super_secret', response.content)
response = self.client.get(
'/raises500/',
HTTP_SECRET_HEADER='super_secret',
HTTP_ACCEPT='application/json',
)
self.assertNotIn(b'super_secret', response.content)
class CustomExceptionReporterFilter(SafeExceptionReporterFilter):
cleansed_substitute = 'XXXXXXXXXXXXXXXXXXXX'
hidden_settings = _lazy_re_compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE|DATABASE_URL', flags=re.I)
@override_settings(
ROOT_URLCONF='view_tests.urls',
DEFAULT_EXCEPTION_REPORTER_FILTER='%s.CustomExceptionReporterFilter' % __name__,
)
class CustomExceptionReporterFilterTests(SimpleTestCase):
def setUp(self):
get_default_exception_reporter_filter.cache_clear()
def tearDown(self):
get_default_exception_reporter_filter.cache_clear()
def test_setting_allows_custom_subclass(self):
self.assertIsInstance(
get_default_exception_reporter_filter(),
CustomExceptionReporterFilter,
)
def test_cleansed_substitute_override(self):
reporter_filter = get_default_exception_reporter_filter()
self.assertEqual(
reporter_filter.cleanse_setting('password', 'super_secret'),
reporter_filter.cleansed_substitute,
)
def test_hidden_settings_override(self):
reporter_filter = get_default_exception_reporter_filter()
self.assertEqual(
reporter_filter.cleanse_setting('database_url', 'super_secret'),
reporter_filter.cleansed_substitute,
)
class NonHTMLResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports.
The plain text 500 debug-only error page is served when it has been
detected the request doesn't accept HTML content. Don't check for
(non)existence of frames vars in the traceback information section of the
response content because they're not included in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_ACCEPT='application/json')
def test_non_sensitive_request(self):
"""
Request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
No POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
def test_non_html_response_encoding(self):
response = self.client.get('/raises500/', HTTP_ACCEPT='application/json')
self.assertEqual(response['Content-Type'], 'text/plain; charset=utf-8')
class DecoratorsTests(SimpleTestCase):
def test_sensitive_variables_not_called(self):
msg = (
'sensitive_variables() must be called to use it as a decorator, '
'e.g., use @sensitive_variables(), not @sensitive_variables.'
)
with self.assertRaisesMessage(TypeError, msg):
@sensitive_variables
def test_func(password):
pass
def test_sensitive_post_parameters_not_called(self):
msg = (
'sensitive_post_parameters() must be called to use it as a '
'decorator, e.g., use @sensitive_post_parameters(), not '
'@sensitive_post_parameters.'
)
with self.assertRaisesMessage(TypeError, msg):
@sensitive_post_parameters
def test_func(request):
return index_page(request)
|
GUI_DD.py | import tkinter as tk
from threading import Thread
import pyttsx3
from comtypes.safearray import numpy
from PIL import ImageTk, Image
from scipy.spatial import distance
import PIL
import cv2
import dlib
from imutils import face_utils
import face_recognition
import os
import csv
import GUI_BAR
import GUI_USERS
from datetime import datetime
import GUI_line
encode_list=[]
name=[]
mylist=os.listdir("re/known/")
def Open_bar():
GUI_BAR.bar_chart()
file_name=datetime.now().day.__str__()+"-"+datetime.now().month.__str__()+"-"+datetime.now().year.__str__()+".csv"
if os.path.exists("re/user_files/"+file_name):
pass
else:
with open("re/user_files/"+file_name,'w',newline="") as file:
writer=csv.writer(file)
writer.writerow(["name","hours"])
#file_name=datetime.now().day.__str__()+"-"+datetime.now().month.__str__()+"-"+datetime.now().year.__str__()+".csv"
if os.path.exists("re/drowsiness_files/"+file_name):
pass
else:
with open("re/drowsiness_files/"+file_name,'w',newline="") as file:
writer=csv.writer(file)
writer.writerow(["name","time","EAR"])
for i in mylist:
img=face_recognition.load_image_file("re/known/"+i)
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
encode=face_recognition.face_encodings(img)[0]
encode_list.append(encode)
name.append(os.path.splitext(i)[0])
class data_t:
EAR_tr = 0.3
EAR_FRAME = 15
COUNTER = 0
FLAG_S=False
FRAME_First=True
name=""
FLAG_START=False
stime = None
etime = None
pt = None
frame_c=0
d=data_t
#Load Detoctor and predictor
det=cv2.CascadeClassifier("haarcascades\haarcascade_frontalface_default.xml")
pre=dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
def Start_frame():
d.FLAG_START=True
show_frame()
start_b["state"] = "disabled"
view_user_b["state"] = "disabled"
report_b["state"] = "disabled"
report_b1["state"] = "disabled"
end_b["state"] = "active"
def Stop_frame():
d.FLAG_START = False
start_b["state"]="active"
view_user_b["state"]="active"
report_b["state"] = "active"
report_b1["state"] = "active"
end_b["state"] = "disabled"
d.FLAG_START=False
d.etime=datetime.now()
d.pt=d.etime - d.stime
print(d.stime)
print(d.etime)
print(d.pt)
d.pt=str(d.pt)
d.pt=datetime.strptime(d.pt,"%H:%M:%S.%f")
with open("re/user_files/"+file_name, 'a') as file:
writer=csv.writer(file)
tot=(d.pt.minute * 0.0166667)+d.pt.hour
writer.writerow([d.name,tot])
width, height = 800, 600
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
root=tk.Toplevel()
background = "dbms.png"
img=Image.open(background)
img=img.resize((800, 600), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
panel = tk.Label(root, image=img)
panel.image = img
panel.pack()
selction=tk.Frame(root)
selction.pack()
#panel.grid(row=1,column=1)
start_b=tk.Button(selction,text="Start",command=Start_frame)
start_b.grid(row=1,column=1)
#start_b.pack()
end_b=tk.Button(selction,text="Stop",command=Stop_frame)
end_b["state"]="disabled"
end_b.grid(row=1,column=2)
view_user_b=tk.Button(selction,text="View Users",command=GUI_USERS.Users)
view_user_b.grid(row=1,column=3)
report_b=tk.Button(selction,text="Bar Chart",command=Open_bar)
report_b.grid(row=1,column=4)
report_b1=tk.Button(selction,text="Line Chart",command=GUI_line.bar_chart)
report_b1.grid(row=1,column=5)
#end_b.pack()
def Speak():
engine = pyttsx3.init()
engine.setProperty('volume', 1.0)
engine.say("Wake Up"+d.name)
engine.runAndWait()
d.FLAG_S=False
def eye_aspect_ratio(eye):
A=distance.euclidean(eye[1],eye[5])
B=distance.euclidean(eye[2],eye[4])
C=distance.euclidean(eye[0],eye[3])
ear=(A+B)/(2.0*C)
return ear
def find_eye(shape):
(lstart, lend) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rstart, rend) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
lefteye=shape[lstart:lend]
righteye=shape[rstart:rend]
leftEAR=eye_aspect_ratio(lefteye)
right_EAR=eye_aspect_ratio(righteye)
ear=(leftEAR+right_EAR)/2
return ear,leftEAR,right_EAR
def show_frame():
_, frame = cap.read()
#frame = cv2.flip(frame, 1)
#Code for Drowsiness
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
rects=det.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5,flags=cv2.CASCADE_SCALE_IMAGE)
if d.FRAME_First:
loct = face_recognition.face_locations(frame)
encodet = face_recognition.face_encodings(frame)
for face, loca in zip(encodet, loct):
match = face_recognition.compare_faces(encode_list, face, 0.5)
dist = face_recognition.face_distance(encode_list, face)
matin = numpy.argmin(dist)
if match[matin]:
frame = cv2.rectangle(frame, (loca[3], loca[0]), (loca[1], loca[2]), (0, 255, 0), 2)
frame = cv2.putText(frame, name[matin], (loca[3], loca[2] + 48), cv2.FONT_HERSHEY_DUPLEX, 2, (0, 255, 0),4)
d.name=name[matin]
else:
h = loca[2] - loca[0]
w = loca[1] - loca[3]
crop_img = frame[loca[0]:loca[0] + h, loca[2]:loca[2] + w]
cv2.imwrite("re/unknown/unknown.png", crop_img)
d.name=""
frame = cv2.rectangle(frame, (loca[3], loca[0]), (loca[1], loca[2]), (0, 255, 0), 2)
frame = cv2.putText(frame, "Unknown", (loca[3], loca[2] + 48), cv2.FONT_HERSHEY_DUPLEX, 2, (0, 255, 0), 4)
print(match)
print(dist)
d.stime=datetime.now()
d.FRAME_First=False
#,minSize=(30,30)
for (x,y,w,h) in rects:
rect=dlib.rectangle(int(x),int(y),int(x+w),int(y+h))
shape=pre(gray,rect)
shape=face_utils.shape_to_np(shape)
eye=find_eye(shape)
ear=eye[0]
leftEAR=eye[1]
rightEAR=eye[2]
if d.frame_c > 100:
with open("re/drowsiness_files/" + file_name, 'a', newline="") as file:
writer = csv.writer(file)
date=str(datetime.now().time().hour) + ":" + str(datetime.now().time().minute) + ":" + str(datetime.now().time().second)
writer.writerow([d.name, date, ear])
d.frame_c = 0
d.frame_c=d.frame_c+1
if ear<d.EAR_tr:
print(d.COUNTER)
d.COUNTER = d.COUNTER + 1
if d.COUNTER>d.EAR_FRAME:
print("You are sleeping")
with open("re/drowsiness_files/"+file_name,'a',newline="") as file:
writer=csv.writer(file)
date=str(datetime.now().time().hour) + ":" + str(datetime.now().time().minute) + ":" + str(datetime.now().time().second)
writer.writerow([d.name,date,ear])
if not d.FLAG_S:
d.FLAG_S=True
t=Thread(target=Speak())
t.daemon=True
t.start()
else:
d.COUNTER = 0
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = PIL.Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
panel.imgtk = imgtk
panel.configure(image=imgtk)
if d.FLAG_START:
panel.after(10, show_frame)
if d.FLAG_START:
show_frame()
root.mainloop()
|
recipe-576684.py | #!/usr/bin/env python
def run_async(func):
"""
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
if __name__ == '__main__':
from time import sleep
@run_async
def print_somedata():
print('starting print_somedata')
sleep(2)
print('print_somedata: 2 sec passed')
sleep(2)
print('print_somedata: 2 sec passed')
sleep(2)
print('finished print_somedata')
def main():
print_somedata()
print('back in main')
print_somedata()
print('back in main')
print_somedata()
print('back in main')
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.