source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
integration_test_support.py
|
# flask-hello-world
# Copyright 2012-2013 Michael Gruber, Alexander Metzner
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides a IntegrationTestServerFixture which runs a
IntegrationTestServer on "http://127.0.0.1:5000/".
"""
from multiprocessing import Process
from pyfix import Fixture
from time import sleep
from urllib import urlopen
from helloworld.webapp import application
class IntegrationTestServer(object):
def __init__(self):
self._process = Process(target=application.run)
self._process.start()
sleep(0.2)
def stop(self):
self._process.terminate()
def get_page(self, url):
return urlopen('http://127.0.0.1:5000' + url).read()
class IntegrationTestServerFixture(Fixture):
def reclaim(self, integration_test_server):
integration_test_server.stop()
def provide(self):
return [IntegrationTestServer()]
|
braille.py
|
# -*- coding: UTF-8 -*-
#braille.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2008-2018 NV Access Limited, Joseph Lee, Babbage B.V., Davy Kager, Bram Duvigneau
import sys
import itertools
import os
import pkgutil
import ctypes.wintypes
import threading
import time
import wx
import louisHelper
import louis
import gui
import winKernel
import keyboardHandler
import baseObject
import config
from logHandler import log
import controlTypes
import api
import textInfos
import brailleDisplayDrivers
import inputCore
import brailleTables
import re
import scriptHandler
import collections
import extensionPoints
import hwPortUtils
import bdDetect
import winUser
roleLabels = {
# Translators: Displayed in braille for an object which is a
# window.
controlTypes.ROLE_WINDOW: _("wnd"),
# Translators: Displayed in braille for an object which is a
# dialog.
controlTypes.ROLE_DIALOG: _("dlg"),
# Translators: Displayed in braille for an object which is a
# check box.
controlTypes.ROLE_CHECKBOX: _("chk"),
# Translators: Displayed in braille for an object which is a
# radio button.
controlTypes.ROLE_RADIOBUTTON: _("rbtn"),
# Translators: Displayed in braille for an object which is an
# editable text field.
controlTypes.ROLE_EDITABLETEXT: _("edt"),
# Translators: Displayed in braille for an object which is a
# button.
controlTypes.ROLE_BUTTON: _("btn"),
# Translators: Displayed in braille for an object which is a
# menu bar.
controlTypes.ROLE_MENUBAR: _("mnubar"),
# Translators: Displayed in braille for an object which is a
# menu item.
controlTypes.ROLE_MENUITEM: _("mnuitem"),
# Translators: Displayed in braille for an object which is a
# menu.
controlTypes.ROLE_POPUPMENU: _("mnu"),
# Translators: Displayed in braille for an object which is a
# combo box.
controlTypes.ROLE_COMBOBOX: _("cbo"),
# Translators: Displayed in braille for an object which is a
# list.
controlTypes.ROLE_LIST: _("lst"),
# Translators: Displayed in braille for an object which is a
# graphic.
controlTypes.ROLE_GRAPHIC: _("gra"),
# Translators: Displayed in braille for an object which is a
# help balloon.
controlTypes.ROLE_HELPBALLOON: _("hlp"),
# Translators: Displayed in braille for an object which is a
# tool tip.
controlTypes.ROLE_TOOLTIP: _("tltip"),
# Translators: Displayed in braille for an object which is a
# link.
controlTypes.ROLE_LINK: _("lnk"),
# Translators: Displayed in braille for an object which is a
# tree view.
controlTypes.ROLE_TREEVIEW: _("tv"),
# Translators: Displayed in braille for an object which is a
# tree view item.
controlTypes.ROLE_TREEVIEWITEM: _("tvitem"),
# Translators: Displayed in braille for an object which is a
# tab control.
controlTypes.ROLE_TABCONTROL: _("tabctl"),
# Translators: Displayed in braille for an object which is a
# progress bar.
controlTypes.ROLE_PROGRESSBAR: _("prgbar"),
# Translators: Displayed in braille for an object which is a
# scroll bar.
controlTypes.ROLE_SCROLLBAR: _("scrlbar"),
# Translators: Displayed in braille for an object which is a
# status bar.
controlTypes.ROLE_STATUSBAR: _("stbar"),
# Translators: Displayed in braille for an object which is a
# table.
controlTypes.ROLE_TABLE: _("tbl"),
# Translators: Displayed in braille for an object which is a
# tool bar.
controlTypes.ROLE_TOOLBAR: _("tlbar"),
# Translators: Displayed in braille for an object which is a
# drop down button.
controlTypes.ROLE_DROPDOWNBUTTON: _("drbtn"),
# Displayed in braille for an object which is a
# separator.
controlTypes.ROLE_SEPARATOR: u"⠤⠤⠤⠤⠤",
# Translators: Displayed in braille for an object which is a
# block quote.
controlTypes.ROLE_BLOCKQUOTE: _("bqt"),
# Translators: Displayed in braille for an object which is a
# document.
controlTypes.ROLE_DOCUMENT: _("doc"),
# Translators: Displayed in braille for an object which is a
# application.
controlTypes.ROLE_APPLICATION: _("app"),
# Translators: Displayed in braille for an object which is a
# grouping.
controlTypes.ROLE_GROUPING: _("grp"),
# Translators: Displayed in braille for an object which is a
# embedded object.
controlTypes.ROLE_EMBEDDEDOBJECT: _("embedded"),
# Translators: Displayed in braille for an object which is a
# end note.
controlTypes.ROLE_ENDNOTE: _("enote"),
# Translators: Displayed in braille for an object which is a
# foot note.
controlTypes.ROLE_FOOTNOTE: _("fnote"),
# Translators: Displayed in braille for an object which is a
# terminal.
controlTypes.ROLE_TERMINAL: _("term"),
# Translators: Displayed in braille for an object which is a
# section.
controlTypes.ROLE_SECTION: _("sect"),
# Translators: Displayed in braille for an object which is a
# toggle button.
controlTypes.ROLE_TOGGLEBUTTON: _("tgbtn"),
# Translators: Displayed in braille for an object which is a
# split button.
controlTypes.ROLE_SPLITBUTTON: _("splbtn"),
# Translators: Displayed in braille for an object which is a
# menu button.
controlTypes.ROLE_MENUBUTTON: _("mnubtn"),
# Translators: Displayed in braille for an object which is a
# spin button.
controlTypes.ROLE_SPINBUTTON: _("spnbtn"),
# Translators: Displayed in braille for an object which is a
# tree view button.
controlTypes.ROLE_TREEVIEWBUTTON: _("tvbtn"),
# Translators: Displayed in braille for an object which is a
# menu.
controlTypes.ROLE_MENU: _("mnu"),
# Translators: Displayed in braille for an object which is a
# panel.
controlTypes.ROLE_PANEL: _("pnl"),
# Translators: Displayed in braille for an object which is a
# password edit.
controlTypes.ROLE_PASSWORDEDIT: _("pwdedt"),
# Translators: Displayed in braille for an object which is deleted.
controlTypes.ROLE_DELETED_CONTENT: _("del"),
# Translators: Displayed in braille for an object which is inserted.
controlTypes.ROLE_INSERTED_CONTENT: _("ins"),
}
positiveStateLabels = {
# Translators: Displayed in braille when an object is selected.
controlTypes.STATE_SELECTED: _("sel"),
# Displayed in braille when an object (e.g. a toggle button) is pressed.
controlTypes.STATE_PRESSED: u"⢎⣿⡱",
# Displayed in braille when an object (e.g. a check box) is checked.
controlTypes.STATE_CHECKED: u"⣏⣿⣹",
# Displayed in braille when an object (e.g. a check box) is half checked.
controlTypes.STATE_HALFCHECKED: u"⣏⣸⣹",
# Translators: Displayed in braille when an object (e.g. an editable text field) is read-only.
controlTypes.STATE_READONLY: _("ro"),
# Translators: Displayed in braille when an object (e.g. a tree view item) is expanded.
controlTypes.STATE_EXPANDED: _("-"),
# Translators: Displayed in braille when an object (e.g. a tree view item) is collapsed.
controlTypes.STATE_COLLAPSED: _("+"),
# Translators: Displayed in braille when an object has a popup (usually a sub-menu).
controlTypes.STATE_HASPOPUP: _("submnu"),
# Translators: Displayed in braille when a protected control or a document is encountered.
controlTypes.STATE_PROTECTED: _("***"),
# Translators: Displayed in braille when a required form field is encountered.
controlTypes.STATE_REQUIRED: _("req"),
# Translators: Displayed in braille when an invalid entry has been made.
controlTypes.STATE_INVALID_ENTRY: _("invalid"),
# Translators: Displayed in braille when an object supports autocompletion.
controlTypes.STATE_AUTOCOMPLETE: _("..."),
# Translators: Displayed in braille when an edit field allows typing multiple lines of text such as comment fields on websites.
controlTypes.STATE_MULTILINE: _("mln"),
# Translators: Displayed in braille when an object is clickable.
controlTypes.STATE_CLICKABLE: _("clk"),
# Translators: Displayed in braille when an object is sorted ascending.
controlTypes.STATE_SORTED_ASCENDING: _("sorted asc"),
# Translators: Displayed in braille when an object is sorted descending.
controlTypes.STATE_SORTED_DESCENDING: _("sorted desc"),
# Translators: Displayed in braille when an object (usually a graphic) has a long description.
controlTypes.STATE_HASLONGDESC: _("ldesc"),
# Translators: Displayed in braille when there is a formula on a spreadsheet cell.
controlTypes.STATE_HASFORMULA: _("frml"),
# Translators: Displayed in braille when there is a comment for a spreadsheet cell or piece of text in a document.
controlTypes.STATE_HASCOMMENT: _("cmnt"),
}
negativeStateLabels = {
# Translators: Displayed in braille when an object is not selected.
controlTypes.STATE_SELECTED: _("nsel"),
# Displayed in braille when an object (e.g. a toggle button) is not pressed.
controlTypes.STATE_PRESSED: u"⢎⣀⡱",
# Displayed in braille when an object (e.g. a check box) is not checked.
controlTypes.STATE_CHECKED: u"⣏⣀⣹",
}
landmarkLabels = {
# Translators: Displayed in braille for the banner landmark, normally found on web pages.
"banner": pgettext("braille landmark abbreviation", "bnnr"),
# Translators: Displayed in braille for the complementary landmark, normally found on web pages.
"complementary": pgettext("braille landmark abbreviation", "cmpl"),
# Translators: Displayed in braille for the contentinfo landmark, normally found on web pages.
"contentinfo": pgettext("braille landmark abbreviation", "cinf"),
# Translators: Displayed in braille for the main landmark, normally found on web pages.
"main": pgettext("braille landmark abbreviation", "main"),
# Translators: Displayed in braille for the navigation landmark, normally found on web pages.
"navigation": pgettext("braille landmark abbreviation", "navi"),
# Translators: Displayed in braille for the search landmark, normally found on web pages.
"search": pgettext("braille landmark abbreviation", "srch"),
# Translators: Displayed in braille for the form landmark, normally found on web pages.
"form": pgettext("braille landmark abbreviation", "form"),
# Strictly speaking, region isn't a landmark, but it is very similar.
# Translators: Displayed in braille for a significant region, normally found on web pages.
"region": pgettext("braille landmark abbreviation", "rgn"),
}
#: Cursor shapes
CURSOR_SHAPES = (
# Translators: The description of a braille cursor shape.
(0xC0, _("Dots 7 and 8")),
# Translators: The description of a braille cursor shape.
(0x80, _("Dot 8")),
# Translators: The description of a braille cursor shape.
(0xFF, _("All dots")),
)
SELECTION_SHAPE = 0xC0 #: Dots 7 and 8
#: Unicode braille indicator at the start of untranslated braille input.
INPUT_START_IND = u"⣏"
#: Unicode braille indicator at the end of untranslated braille input.
INPUT_END_IND = u" ⣹"
# used to separate chunks of text when programmatically joined
TEXT_SEPARATOR = " "
#: Identifier for a focus context presentation setting that
#: only shows as much as possible focus context information when the context has changed.
CONTEXTPRES_CHANGEDCONTEXT = "changedContext"
#: Identifier for a focus context presentation setting that
#: shows as much as possible focus context information if the focus object doesn't fill up the whole display.
CONTEXTPRES_FILL = "fill"
#: Identifier for a focus context presentation setting that
#: always shows the object with focus at the very left of the braille display.
CONTEXTPRES_SCROLL = "scroll"
#: Focus context presentations associated with their user readable and translatable labels
focusContextPresentations=[
# Translators: The label for a braille focus context presentation setting that
# only shows as much as possible focus context information when the context has changed.
(CONTEXTPRES_CHANGEDCONTEXT, _("Fill display for context changes")),
# Translators: The label for a braille focus context presentation setting that
# shows as much as possible focus context information if the focus object doesn't fill up the whole display.
# This was the pre NVDA 2017.3 default.
(CONTEXTPRES_FILL, _("Always fill display")),
# Translators: The label for a braille focus context presentation setting that
# always shows the object with focus at the very left of the braille display
# (i.e. you will have to scroll back for focus context information).
(CONTEXTPRES_SCROLL, _("Only when scrolling back")),
]
#: Named tuple for a region with start and end positions in a buffer
RegionWithPositions = collections.namedtuple("RegionWithPositions",("region","start","end"))
#: Automatic constant to be used by braille displays that support the "automatic" port
#: and automatic braille display detection
#: @type: tuple
# Translators: String representing automatic port selection for braille displays.
AUTOMATIC_PORT = ("auto", _("Automatic"))
#: Used in place of a specific braille display driver name to indicate that
#: braille displays should be automatically detected and used.
#: @type: str
AUTO_DISPLAY_NAME = AUTOMATIC_PORT[0]
#: A port name which indicates that USB should be used.
#: @type: tuple
# Translators: String representing the USB port selection for braille displays.
USB_PORT = ("usb", _("USB"))
#: A port name which indicates that Bluetooth should be used.
#: @type: tuple
# Translators: String representing the Bluetooth port selection for braille displays.
BLUETOOTH_PORT = ("bluetooth", _("Bluetooth"))
def NVDAObjectHasUsefulText(obj):
import displayModel
if issubclass(obj.TextInfo,displayModel.DisplayModelTextInfo):
# #1711: Flat review (using displayModel) should always be presented on the braille display
return True
else:
# Let the NVDAObject choose if the text should be presented
return obj._hasNavigableText
def _getDisplayDriver(moduleName, caseSensitive=True):
try:
return __import__("brailleDisplayDrivers.%s" % moduleName, globals(), locals(), ("brailleDisplayDrivers",)).BrailleDisplayDriver
except ImportError as initialException:
if caseSensitive:
raise initialException
for loader, name, isPkg in pkgutil.iter_modules(brailleDisplayDrivers.__path__):
if name.startswith('_') or name.lower() != moduleName.lower():
continue
return __import__("brailleDisplayDrivers.%s" % name, globals(), locals(), ("brailleDisplayDrivers",)).BrailleDisplayDriver
else:
raise initialException
def getDisplayList(excludeNegativeChecks=True):
"""Gets a list of available display driver names with their descriptions.
@param excludeNegativeChecks: excludes all drivers for which the check method returns C{False}.
@type excludeNegativeChecks: bool
@return: list of tuples with driver names and descriptions.
@rtype: [(str,unicode)]
"""
displayList = []
# The display that should be placed at the end of the list.
lastDisplay = None
for loader, name, isPkg in pkgutil.iter_modules(brailleDisplayDrivers.__path__):
if name.startswith('_'):
continue
try:
display = _getDisplayDriver(name)
except:
log.error("Error while importing braille display driver %s" % name,
exc_info=True)
continue
try:
if not excludeNegativeChecks or display.check():
if display.name == "noBraille":
lastDisplay = (display.name, display.description)
else:
displayList.append((display.name, display.description))
else:
log.debugWarning("Braille display driver %s reports as unavailable, excluding" % name)
except:
log.error("", exc_info=True)
displayList.sort(key=lambda d : d[1].lower())
if lastDisplay:
displayList.append(lastDisplay)
return displayList
class Region(object):
"""A region of braille to be displayed.
Each portion of braille to be displayed is represented by a region.
The region is responsible for retrieving its text and the cursor and selection positions, translating it into braille cells and handling cursor routing requests relative to its braille cells.
The L{BrailleBuffer} containing this region will call L{update} and expect that L{brailleCells}, L{brailleCursorPos}, L{brailleSelectionStart} and L{brailleSelectionEnd} will be set appropriately.
L{routeTo} will be called to handle a cursor routing request.
"""
def __init__(self):
#: The original, raw text of this region.
self.rawText = ""
#: The position of the cursor in L{rawText}, C{None} if the cursor is not in this region.
#: @type: int
self.cursorPos = None
#: The start of the selection in L{rawText} (inclusive), C{None} if there is no selection in this region.
#: @type: int
self.selectionStart = None
#: The end of the selection in L{rawText} (exclusive), C{None} if there is no selection in this region.
#: @type: int
self.selectionEnd = None
#: The translated braille representation of this region.
#: @type: [int, ...]
self.brailleCells = []
#: liblouis typeform flags for each character in L{rawText},
#: C{None} if no typeform info.
#: @type: [int, ...]
self.rawTextTypeforms = None
#: A list mapping positions in L{rawText} to positions in L{brailleCells}.
#: @type: [int, ...]
self.rawToBraillePos = []
#: A list mapping positions in L{brailleCells} to positions in L{rawText}.
#: @type: [int, ...]
self.brailleToRawPos = []
#: The position of the cursor in L{brailleCells}, C{None} if the cursor is not in this region.
#: @type: int
self.brailleCursorPos = None
#: The position of the selection start in L{brailleCells}, C{None} if there is no selection in this region.
#: @type: int
self.brailleSelectionStart = None
#: The position of the selection end in L{brailleCells}, C{None} if there is no selection in this region.
#: @type: int
self.brailleSelectionEnd = None
#: Whether to hide all previous regions.
#: @type: bool
self.hidePreviousRegions = False
#: Whether this region should be positioned at the absolute left of the display when focused.
#: @type: bool
self.focusToHardLeft = False
def update(self):
"""Update this region.
Subclasses should extend this to update L{rawText}, L{cursorPos}, L{selectionStart} and L{selectionEnd} if necessary.
The base class method handles translation of L{rawText} into braille, placing the result in L{brailleCells}.
Typeform information from L{rawTextTypeforms} is used, if any.
L{rawToBraillePos} and L{brailleToRawPos} are updated according to the translation.
L{brailleCursorPos}, L{brailleSelectionStart} and L{brailleSelectionEnd} are similarly updated based on L{cursorPos}, L{selectionStart} and L{selectionEnd}, respectively.
@postcondition: L{brailleCells}, L{brailleCursorPos}, L{brailleSelectionStart} and L{brailleSelectionEnd} are updated and ready for rendering.
"""
mode = louis.dotsIO
if config.conf["braille"]["expandAtCursor"] and self.cursorPos is not None:
mode |= louis.compbrlAtCursor
self.brailleCells, self.brailleToRawPos, self.rawToBraillePos, self.brailleCursorPos = louisHelper.translate(
[os.path.join(brailleTables.TABLES_DIR, config.conf["braille"]["translationTable"]),
"braille-patterns.cti"],
self.rawText,
typeform=self.rawTextTypeforms,
mode=mode,
cursorPos=self.cursorPos
)
if self.selectionStart is not None and self.selectionEnd is not None:
try:
# Mark the selection.
self.brailleSelectionStart = self.rawToBraillePos[self.selectionStart]
if self.selectionEnd >= len(self.rawText):
self.brailleSelectionEnd = len(self.brailleCells)
else:
self.brailleSelectionEnd = self.rawToBraillePos[self.selectionEnd]
for pos in xrange(self.brailleSelectionStart, self.brailleSelectionEnd):
self.brailleCells[pos] |= SELECTION_SHAPE
except IndexError:
pass
def routeTo(self, braillePos):
"""Handle a cursor routing request.
For example, this might activate an object or move the cursor to the requested position.
@param braillePos: The routing position in L{brailleCells}.
@type braillePos: int
@note: If routing the cursor, L{brailleToRawPos} can be used to translate L{braillePos} into a position in L{rawText}.
"""
def nextLine(self):
"""Move to the next line if possible.
"""
def previousLine(self, start=False):
"""Move to the previous line if possible.
@param start: C{True} to move to the start of the line, C{False} to move to the end.
@type start: bool
"""
class TextRegion(Region):
"""A simple region containing a string of text.
"""
def __init__(self, text):
super(TextRegion, self).__init__()
self.rawText = text
def getBrailleTextForProperties(**propertyValues):
textList = []
name = propertyValues.get("name")
if name:
textList.append(name)
role = propertyValues.get("role")
roleText = propertyValues.get("roleText")
states = propertyValues.get("states")
positionInfo = propertyValues.get("positionInfo")
level = positionInfo.get("level") if positionInfo else None
cellCoordsText=propertyValues.get('cellCoordsText')
rowNumber = propertyValues.get("rowNumber")
columnNumber = propertyValues.get("columnNumber")
rowSpan = propertyValues.get("rowSpan")
columnSpan = propertyValues.get("columnSpan")
includeTableCellCoords = propertyValues.get("includeTableCellCoords", True)
if role is not None and not roleText:
if role == controlTypes.ROLE_HEADING and level:
# Translators: Displayed in braille for a heading with a level.
# %s is replaced with the level.
roleText = _("h%s") % level
level = None
elif role == controlTypes.ROLE_LINK and states and controlTypes.STATE_VISITED in states:
states = states.copy()
states.discard(controlTypes.STATE_VISITED)
# Translators: Displayed in braille for a link which has been visited.
roleText = _("vlnk")
elif (name or cellCoordsText or rowNumber or columnNumber) and role in controlTypes.silentRolesOnFocus:
roleText = None
else:
roleText = roleLabels.get(role, controlTypes.roleLabels[role])
elif role is None:
role = propertyValues.get("_role")
value = propertyValues.get("value")
if value and role not in controlTypes.silentValuesForRoles:
textList.append(value)
if states:
textList.extend(
controlTypes.processAndLabelStates(role, states, controlTypes.REASON_FOCUS, states, None, positiveStateLabels, negativeStateLabels)
)
if roleText:
textList.append(roleText)
description = propertyValues.get("description")
if description:
textList.append(description)
keyboardShortcut = propertyValues.get("keyboardShortcut")
if keyboardShortcut:
textList.append(keyboardShortcut)
if positionInfo:
indexInGroup = positionInfo.get("indexInGroup")
similarItemsInGroup = positionInfo.get("similarItemsInGroup")
if indexInGroup and similarItemsInGroup:
# Translators: Brailled to indicate the position of an item in a group of items (such as a list).
# {number} is replaced with the number of the item in the group.
# {total} is replaced with the total number of items in the group.
textList.append(_("{number} of {total}").format(number=indexInGroup, total=similarItemsInGroup))
if level is not None:
# Translators: Displayed in braille when an object (e.g. a tree view item) has a hierarchical level.
# %s is replaced with the level.
textList.append(_('lv %s')%positionInfo['level'])
if rowNumber:
if includeTableCellCoords and not cellCoordsText:
if rowSpan>1:
# Translators: Displayed in braille for the table cell row numbers when a cell spans multiple rows.
# Occurences of %s are replaced with the corresponding row numbers.
rowStr = _("r{rowNumber}-{rowSpan}").format(rowNumber=rowNumber,rowSpan=rowNumber+rowSpan-1)
else:
# Translators: Displayed in braille for a table cell row number.
# %s is replaced with the row number.
rowStr = _("r{rowNumber}").format(rowNumber=rowNumber)
textList.append(rowStr)
if columnNumber:
columnHeaderText = propertyValues.get("columnHeaderText")
if columnHeaderText:
textList.append(columnHeaderText)
if includeTableCellCoords and not cellCoordsText:
if columnSpan>1:
# Translators: Displayed in braille for the table cell column numbers when a cell spans multiple columns.
# Occurences of %s are replaced with the corresponding column numbers.
columnStr = _("c{columnNumber}-{columnSpan}").format(columnNumber=columnNumber,columnSpan=columnNumber+columnSpan-1)
else:
# Translators: Displayed in braille for a table cell column number.
# %s is replaced with the column number.
columnStr = _("c{columnNumber}").format(columnNumber=columnNumber)
textList.append(columnStr)
current = propertyValues.get('current', False)
if current:
try:
textList.append(controlTypes.isCurrentLabels[current])
except KeyError:
log.debugWarning("Aria-current value not handled: %s"%current)
textList.append(controlTypes.isCurrentLabels[True])
placeholder = propertyValues.get('placeholder', None)
if placeholder:
textList.append(placeholder)
if includeTableCellCoords and cellCoordsText:
textList.append(cellCoordsText)
return TEXT_SEPARATOR.join([x for x in textList if x])
class NVDAObjectRegion(Region):
"""A region to provide a braille representation of an NVDAObject.
This region will update based on the current state of the associated NVDAObject.
A cursor routing request will activate the object's default action.
"""
def __init__(self, obj, appendText=""):
"""Constructor.
@param obj: The associated NVDAObject.
@type obj: L{NVDAObjects.NVDAObject}
@param appendText: Text which should always be appended to the NVDAObject text, useful if this region will always precede other regions.
@type appendText: str
"""
super(NVDAObjectRegion, self).__init__()
self.obj = obj
self.appendText = appendText
def update(self):
obj = self.obj
presConfig = config.conf["presentation"]
role = obj.role
placeholderValue = obj.placeholder
if placeholderValue and not obj._isTextEmpty:
placeholderValue = None
text = getBrailleTextForProperties(
name=obj.name,
role=role,
roleText=obj.roleText,
current=obj.isCurrent,
placeholder=placeholderValue,
value=obj.value if not NVDAObjectHasUsefulText(obj) else None ,
states=obj.states,
description=obj.description if presConfig["reportObjectDescriptions"] else None,
keyboardShortcut=obj.keyboardShortcut if presConfig["reportKeyboardShortcuts"] else None,
positionInfo=obj.positionInfo if presConfig["reportObjectPositionInformation"] else None,
cellCoordsText=obj.cellCoordsText if config.conf["documentFormatting"]["reportTableCellCoords"] else None,
)
if role == controlTypes.ROLE_MATH:
import mathPres
mathPres.ensureInit()
if mathPres.brailleProvider:
try:
text += TEXT_SEPARATOR + mathPres.brailleProvider.getBrailleForMathMl(
obj.mathMl)
except (NotImplementedError, LookupError):
pass
self.rawText = text + self.appendText
super(NVDAObjectRegion, self).update()
def routeTo(self, braillePos):
try:
self.obj.doAction()
except NotImplementedError:
pass
def getControlFieldBraille(info, field, ancestors, reportStart, formatConfig):
presCat = field.getPresentationCategory(ancestors, formatConfig)
# Cache this for later use.
field._presCat = presCat
if reportStart:
# If this is a container, only report it if this is the start of the node.
if presCat == field.PRESCAT_CONTAINER and not field.get("_startOfNode"):
return None
else:
# We only report ends for containers
# and only if this is the end of the node.
if presCat != field.PRESCAT_CONTAINER or not field.get("_endOfNode"):
return None
role = field.get("role", controlTypes.ROLE_UNKNOWN)
states = field.get("states", set())
value=field.get('value',None)
current=field.get('current', None)
placeholder=field.get('placeholder', None)
roleText=field.get('roleText')
if presCat == field.PRESCAT_LAYOUT:
text = []
if current:
text.append(getBrailleTextForProperties(current=current))
return TEXT_SEPARATOR.join(text) if len(text) != 0 else None
elif role in (controlTypes.ROLE_TABLECELL, controlTypes.ROLE_TABLECOLUMNHEADER, controlTypes.ROLE_TABLEROWHEADER) and field.get("table-id"):
# Table cell.
reportTableHeaders = formatConfig["reportTableHeaders"]
reportTableCellCoords = formatConfig["reportTableCellCoords"]
props = {
"states": states,
"rowNumber": field.get("table-rownumber"),
"columnNumber": field.get("table-columnnumber"),
"rowSpan": field.get("table-rowsspanned"),
"columnSpan": field.get("table-columnsspanned"),
"includeTableCellCoords": reportTableCellCoords,
"current": current,
}
if reportTableHeaders:
props["columnHeaderText"] = field.get("table-columnheadertext")
return getBrailleTextForProperties(**props)
elif reportStart:
props = {
# Don't report the role for math here.
# However, we still need to pass it (hence "_role").
"_role" if role == controlTypes.ROLE_MATH else "role": role,
"states": states,"value":value, "current":current, "placeholder":placeholder,"roleText":roleText}
if config.conf["presentation"]["reportKeyboardShortcuts"]:
kbShortcut = field.get("keyboardShortcut")
if kbShortcut:
props["keyboardShortcut"] = kbShortcut
level = field.get("level")
if level:
props["positionInfo"] = {"level": level}
text = getBrailleTextForProperties(**props)
content = field.get("content")
if content:
if text:
text += TEXT_SEPARATOR
text += content
elif role == controlTypes.ROLE_MATH:
import mathPres
mathPres.ensureInit()
if mathPres.brailleProvider:
try:
if text:
text += TEXT_SEPARATOR
text += mathPres.brailleProvider.getBrailleForMathMl(
info.getMathMl(field))
except (NotImplementedError, LookupError):
pass
return text
else:
# Translators: Displayed in braille at the end of a control field such as a list or table.
# %s is replaced with the control's role.
return (_("%s end") %
getBrailleTextForProperties(role=role,roleText=roleText))
def getFormatFieldBraille(field, fieldCache, isAtStart, formatConfig):
"""Generates the braille text for the given format field.
@param field: The format field to examine.
@type field: {str : str, ...}
@param fieldCache: The format field of the previous run; i.e. the cached format field.
@type fieldCache: {str : str, ...}
@param isAtStart: True if this format field precedes any text in the line/paragraph.
This is useful to restrict display of information which should only appear at the start of the line/paragraph;
e.g. the line number or line prefix (list bullet/number).
@type isAtStart: bool
@param formatConfig: The formatting config.
@type formatConfig: {str : bool, ...}
"""
textList = []
if isAtStart:
if formatConfig["reportLineNumber"]:
lineNumber = field.get("line-number")
if lineNumber:
textList.append("%s" % lineNumber)
linePrefix = field.get("line-prefix")
if linePrefix:
textList.append(linePrefix)
if formatConfig["reportHeadings"]:
headingLevel=field.get('heading-level')
if headingLevel:
# Translators: Displayed in braille for a heading with a level.
# %s is replaced with the level.
textList.append(_("h%s")%headingLevel)
if formatConfig["reportLinks"]:
link=field.get("link")
oldLink=fieldCache.get("link")
if link and link != oldLink:
textList.append(roleLabels[controlTypes.ROLE_LINK])
fieldCache.clear()
fieldCache.update(field)
return TEXT_SEPARATOR.join([x for x in textList if x])
class TextInfoRegion(Region):
pendingCaretUpdate=False #: True if the cursor should be updated for this region on the display
allowPageTurns=True #: True if a page turn should be tried when a TextInfo cannot move anymore and the object supports page turns.
def __init__(self, obj):
super(TextInfoRegion, self).__init__()
self.obj = obj
def _isMultiline(self):
# A region's object can either be an NVDAObject or a tree interceptor.
# Tree interceptors should always be multiline.
from treeInterceptorHandler import TreeInterceptor
if isinstance(self.obj, TreeInterceptor):
return True
# Terminals and documents are inherently multiline, so they don't have the multiline state.
return (
self.obj.role in (controlTypes.ROLE_TERMINAL,controlTypes.ROLE_DOCUMENT)
or controlTypes.STATE_MULTILINE in self.obj.states
)
def _getSelection(self):
"""Retrieve the selection.
If there is no selection, retrieve the collapsed cursor.
@return: The selection.
@rtype: L{textInfos.TextInfo}
"""
try:
return self.obj.makeTextInfo(textInfos.POSITION_SELECTION)
except:
return self.obj.makeTextInfo(textInfos.POSITION_FIRST)
def _setCursor(self, info):
"""Set the cursor.
@param info: The range to which the cursor should be moved.
@type info: L{textInfos.TextInfo}
"""
try:
info.updateCaret()
except NotImplementedError:
log.debugWarning("", exc_info=True)
def _getTypeformFromFormatField(self, field, formatConfig):
typeform = louis.plain_text
if not formatConfig["reportFontAttributes"]:
return typeform
if field.get("bold", False):
typeform |= louis.bold
if field.get("italic", False):
typeform |= louis.italic
if field.get("underline", False):
typeform |= louis.underline
return typeform
def _addFieldText(self, text, contentPos, separate=True):
if separate and self.rawText:
# Separate this field text from the rest of the text.
text = TEXT_SEPARATOR + text
self.rawText += text
textLen = len(text)
self.rawTextTypeforms.extend((louis.plain_text,) * textLen)
self._rawToContentPos.extend((contentPos,) * textLen)
def _addTextWithFields(self, info, formatConfig, isSelection=False):
shouldMoveCursorToFirstContent = not isSelection and self.cursorPos is not None
ctrlFields = []
typeform = louis.plain_text
formatFieldAttributesCache = getattr(info.obj, "_brailleFormatFieldAttributesCache", {})
# When true, we are inside a clickable field, and should therefore not report any more new clickable fields
inClickable=False
for command in info.getTextWithFields(formatConfig=formatConfig):
if isinstance(command, basestring):
# Text should break a run of clickables
inClickable=False
self._isFormatFieldAtStart = False
if not command:
continue
if self._endsWithField:
# The last item added was a field,
# so add a space before the content.
self.rawText += TEXT_SEPARATOR
self.rawTextTypeforms.append(louis.plain_text)
self._rawToContentPos.append(self._currentContentPos)
if isSelection and self.selectionStart is None:
# This is where the content begins.
self.selectionStart = len(self.rawText)
elif shouldMoveCursorToFirstContent:
# This is the first piece of content after the cursor.
# Position the cursor here, as it may currently be positioned on control field text.
self.cursorPos = len(self.rawText)
shouldMoveCursorToFirstContent = False
self.rawText += command
commandLen = len(command)
self.rawTextTypeforms.extend((typeform,) * commandLen)
endPos = self._currentContentPos + commandLen
self._rawToContentPos.extend(xrange(self._currentContentPos, endPos))
self._currentContentPos = endPos
if isSelection:
# The last time this is set will be the end of the content.
self.selectionEnd = len(self.rawText)
self._endsWithField = False
elif isinstance(command, textInfos.FieldCommand):
cmd = command.command
field = command.field
if cmd == "formatChange":
typeform = self._getTypeformFromFormatField(field, formatConfig)
text = getFormatFieldBraille(field, formatFieldAttributesCache, self._isFormatFieldAtStart, formatConfig)
if not text:
continue
# Map this field text to the start of the field's content.
self._addFieldText(text, self._currentContentPos)
elif cmd == "controlStart":
if self._skipFieldsNotAtStartOfNode and not field.get("_startOfNode"):
text = None
else:
textList=[]
if not inClickable and formatConfig['reportClickable']:
states=field.get('states')
if states and controlTypes.STATE_CLICKABLE in states:
# We have entered an outer most clickable or entered a new clickable after exiting a previous one
# Report it if there is nothing else interesting about the field
field._presCat=presCat=field.getPresentationCategory(ctrlFields,formatConfig)
if not presCat or presCat is field.PRESCAT_LAYOUT:
textList.append(positiveStateLabels[controlTypes.STATE_CLICKABLE])
inClickable=True
text = info.getControlFieldBraille(field, ctrlFields, True, formatConfig)
if text:
textList.append(text)
text=" ".join(textList)
# Place this field on a stack so we can access it for controlEnd.
ctrlFields.append(field)
if not text:
continue
if getattr(field, "_presCat") == field.PRESCAT_MARKER:
# In this case, the field text is what the user cares about,
# not the actual content.
fieldStart = len(self.rawText)
if fieldStart > 0:
# There'll be a space before the field text.
fieldStart += 1
if isSelection and self.selectionStart is None:
self.selectionStart = fieldStart
elif shouldMoveCursorToFirstContent:
self.cursorPos = fieldStart
shouldMoveCursorToFirstContent = False
# Map this field text to the start of the field's content.
self._addFieldText(text, self._currentContentPos)
elif cmd == "controlEnd":
# Exiting a controlField should break a run of clickables
inClickable=False
field = ctrlFields.pop()
text = info.getControlFieldBraille(field, ctrlFields, False, formatConfig)
if not text:
continue
# Map this field text to the end of the field's content.
self._addFieldText(text, self._currentContentPos - 1)
self._endsWithField = True
if isSelection and self.selectionStart is None:
# There is no selection. This is a cursor.
self.cursorPos = len(self.rawText)
if not self._skipFieldsNotAtStartOfNode:
# We only render fields that aren't at the start of their nodes for the first part of the reading unit.
# Otherwise, we'll render fields that have already been rendered.
self._skipFieldsNotAtStartOfNode = True
info.obj._brailleFormatFieldAttributesCache = formatFieldAttributesCache
def _getReadingUnit(self):
return textInfos.UNIT_PARAGRAPH if config.conf["braille"]["readByParagraph"] else textInfos.UNIT_LINE
def update(self):
formatConfig = config.conf["documentFormatting"]
unit = self._getReadingUnit()
self.rawText = ""
self.rawTextTypeforms = []
self.cursorPos = None
# The output includes text representing fields which isn't part of the real content in the control.
# Therefore, maintain a map of positions in the output to positions in the content.
self._rawToContentPos = []
self._currentContentPos = 0
self.selectionStart = self.selectionEnd = None
self._isFormatFieldAtStart = True
self._skipFieldsNotAtStartOfNode = False
self._endsWithField = False
# Selection has priority over cursor.
# HACK: Some TextInfos only support UNIT_LINE properly if they are based on POSITION_CARET,
# and copying the TextInfo breaks this ability.
# So use the original TextInfo for line and a copy for cursor/selection.
self._readingInfo = readingInfo = self._getSelection()
sel = readingInfo.copy()
if not sel.isCollapsed:
# There is a selection.
if self.obj.isTextSelectionAnchoredAtStart:
# The end of the range is exclusive, so make it inclusive first.
readingInfo.move(textInfos.UNIT_CHARACTER, -1, "end")
# Collapse the selection to the unanchored end.
readingInfo.collapse(end=self.obj.isTextSelectionAnchoredAtStart)
# Get the reading unit at the selection.
readingInfo.expand(unit)
# Restrict the selection to the reading unit.
if sel.compareEndPoints(readingInfo, "startToStart") < 0:
sel.setEndPoint(readingInfo, "startToStart")
if sel.compareEndPoints(readingInfo, "endToEnd") > 0:
sel.setEndPoint(readingInfo, "endToEnd")
else:
# There is a cursor.
# Get the reading unit at the cursor.
readingInfo.expand(unit)
# Not all text APIs support offsets, so we can't always get the offset of the selection relative to the start of the reading unit.
# Therefore, grab the reading unit in three parts.
# First, the chunk from the start of the reading unit to the start of the selection.
chunk = readingInfo.copy()
chunk.collapse()
chunk.setEndPoint(sel, "endToStart")
self._addTextWithFields(chunk, formatConfig)
# If the user is entering braille, place any untranslated braille before the selection.
# Import late to avoid circular import.
import brailleInput
text = brailleInput.handler.untranslatedBraille
if text:
rawInputIndStart = len(self.rawText)
# _addFieldText adds text to self.rawText and updates other state accordingly.
self._addFieldText(INPUT_START_IND + text + INPUT_END_IND, None, separate=False)
rawInputIndEnd = len(self.rawText)
else:
rawInputIndStart = None
# Now, the selection itself.
self._addTextWithFields(sel, formatConfig, isSelection=True)
# Finally, get the chunk from the end of the selection to the end of the reading unit.
chunk.setEndPoint(readingInfo, "endToEnd")
chunk.setEndPoint(sel, "startToEnd")
self._addTextWithFields(chunk, formatConfig)
# Strip line ending characters.
self.rawText = self.rawText.rstrip("\r\n\0\v\f")
rawTextLen = len(self.rawText)
if rawTextLen < len(self._rawToContentPos):
# The stripped text is shorter than the original.
self._currentContentPos = self._rawToContentPos[rawTextLen]
del self.rawTextTypeforms[rawTextLen:]
# Trimming _rawToContentPos doesn't matter,
# because we'll only ever ask for indexes valid in rawText.
#del self._rawToContentPos[rawTextLen:]
if rawTextLen == 0 or not self._endsWithField:
# There is no text left after stripping line ending characters,
# or the last item added can be navigated with a cursor.
# Add a space in case the cursor is at the end of the reading unit.
self.rawText += TEXT_SEPARATOR
rawTextLen += 1
self.rawTextTypeforms.append(louis.plain_text)
self._rawToContentPos.append(self._currentContentPos)
if self.cursorPos is not None and self.cursorPos >= rawTextLen:
self.cursorPos = rawTextLen - 1
# The selection end doesn't have to be checked, Region.update() makes sure brailleSelectionEnd is valid.
# If this is not the start of the object, hide all previous regions.
start = readingInfo.obj.makeTextInfo(textInfos.POSITION_FIRST)
self.hidePreviousRegions = (start.compareEndPoints(readingInfo, "startToStart") < 0)
# Don't touch focusToHardLeft if it is already true
# For example, it can be set to True in getFocusContextRegions when this region represents the first new focus ancestor
# Alternatively, BrailleHandler._doNewObject can set this to True when this region represents the focus object and the focus ancestry didn't change
if not self.focusToHardLeft:
# If this is a multiline control, position it at the absolute left of the display when focused.
self.focusToHardLeft = self._isMultiline()
super(TextInfoRegion, self).update()
if rawInputIndStart is not None:
assert rawInputIndEnd is not None, "rawInputIndStart set but rawInputIndEnd isn't"
# These are the start and end of the untranslated input area,
# including the start and end indicators.
self._brailleInputIndStart = self.rawToBraillePos[rawInputIndStart]
self._brailleInputIndEnd = self.rawToBraillePos[rawInputIndEnd]
# These are the start and end of the actual untranslated input, excluding indicators.
self._brailleInputStart = self._brailleInputIndStart + len(INPUT_START_IND)
self._brailleInputEnd = self._brailleInputIndEnd - len(INPUT_END_IND)
self.brailleCursorPos = self._brailleInputStart + brailleInput.handler.untranslatedCursorPos
else:
self._brailleInputIndStart = None
def getTextInfoForBraillePos(self, braillePos):
pos = self._rawToContentPos[self.brailleToRawPos[braillePos]]
# pos is relative to the start of the reading unit.
# Therefore, get the start of the reading unit...
dest = self._readingInfo.copy()
dest.collapse()
# and move pos characters from there.
dest.move(textInfos.UNIT_CHARACTER, pos)
return dest
def routeTo(self, braillePos):
if self._brailleInputIndStart is not None and self._brailleInputIndStart <= braillePos < self._brailleInputIndEnd:
# The user is moving within untranslated braille input.
if braillePos < self._brailleInputStart:
# The user routed to the start indicator. Route to the start of the input.
braillePos = self._brailleInputStart
elif braillePos > self._brailleInputEnd:
# The user routed to the end indicator. Route to the end of the input.
braillePos = self._brailleInputEnd
# Import late to avoid circular import.
import brailleInput
brailleInput.handler.untranslatedCursorPos = braillePos - self._brailleInputStart
self.brailleCursorPos = self._brailleInputStart + brailleInput.handler.untranslatedCursorPos
brailleInput.handler.updateDisplay()
return
if braillePos == self.brailleCursorPos:
# The cursor is already at this position,
# so activate the position.
try:
self._getSelection().activate()
except NotImplementedError:
pass
return
dest = self.getTextInfoForBraillePos(braillePos)
self._setCursor(dest)
def nextLine(self):
dest = self._readingInfo.copy()
moved = dest.move(self._getReadingUnit(), 1)
if not moved:
if self.allowPageTurns and isinstance(dest.obj,textInfos.DocumentWithPageTurns):
try:
dest.obj.turnPage()
except RuntimeError:
pass
else:
dest=dest.obj.makeTextInfo(textInfos.POSITION_FIRST)
else: # no page turn support
return
dest.collapse()
self._setCursor(dest)
def previousLine(self, start=False):
dest = self._readingInfo.copy()
dest.collapse()
if start:
unit = self._getReadingUnit()
else:
# If the end of the reading unit is desired, move to the last character.
unit = textInfos.UNIT_CHARACTER
moved = dest.move(unit, -1)
if not moved:
if self.allowPageTurns and isinstance(dest.obj,textInfos.DocumentWithPageTurns):
try:
dest.obj.turnPage(previous=True)
except RuntimeError:
pass
else:
dest=dest.obj.makeTextInfo(textInfos.POSITION_LAST)
dest.expand(unit)
else: # no page turn support
return
dest.collapse()
self._setCursor(dest)
class CursorManagerRegion(TextInfoRegion):
def _isMultiline(self):
return True
def _getSelection(self):
return self.obj.selection
def _setCursor(self, info):
self.obj.selection = info
class ReviewTextInfoRegion(TextInfoRegion):
allowPageTurns=False
def _getSelection(self):
return api.getReviewPosition().copy()
def _setCursor(self, info):
api.setReviewPosition(info)
def rindex(seq, item, start, end):
for index in xrange(end - 1, start - 1, -1):
if seq[index] == item:
return index
raise ValueError("%r is not in sequence" % item)
class BrailleBuffer(baseObject.AutoPropertyObject):
def __init__(self, handler):
self.handler = handler
#: The regions in this buffer.
#: @type: [L{Region}, ...]
self.regions = []
#: The raw text of the entire buffer.
self.rawText = ""
#: The position of the cursor in L{brailleCells}, C{None} if no region contains the cursor.
#: @type: int
self.cursorPos = None
#: The translated braille representation of the entire buffer.
#: @type: [int, ...]
self.brailleCells = []
#: The position in L{brailleCells} where the display window starts (inclusive).
#: @type: int
self.windowStartPos = 0
def clear(self):
"""Clear the entire buffer.
This removes all regions and resets the window position to 0.
"""
self.regions = []
self.rawText = ""
self.cursorPos = None
self.brailleCursorPos = None
self.brailleCells = []
self.windowStartPos = 0
def _get_visibleRegions(self):
if not self.regions:
return
if self.regions[-1].hidePreviousRegions:
yield self.regions[-1]
return
for region in self.regions:
yield region
def _get_regionsWithPositions(self):
start = 0
for region in self.visibleRegions:
end = start + len(region.brailleCells)
yield RegionWithPositions(region, start, end)
start = end
_cache_rawToBraillePos=True
def _get_rawToBraillePos(self):
"""@return: a list mapping positions in L{rawText} to positions in L{brailleCells} for the entire buffer.
@rtype: [int, ...]
"""
rawToBraillePos = []
for region, regionStart, regionEnd in self.regionsWithPositions:
rawToBraillePos.extend(p+regionStart for p in region.rawToBraillePos)
return rawToBraillePos
_cache_brailleToRawPos=True
def _get_brailleToRawPos(self):
"""@return: a list mapping positions in L{brailleCells} to positions in L{rawText} for the entire buffer.
@rtype: [int, ...]
"""
brailleToRawPos = []
start = 0
for region in self.visibleRegions:
brailleToRawPos.extend(p+start for p in region.brailleToRawPos)
start+=len(region.rawText)
return brailleToRawPos
def bufferPosToRegionPos(self, bufferPos):
for region, start, end in self.regionsWithPositions:
if end > bufferPos:
return region, bufferPos - start
raise LookupError("No such position")
def regionPosToBufferPos(self, region, pos, allowNearest=False):
for testRegion, start, end in self.regionsWithPositions:
if region == testRegion:
if pos < end - start:
# The requested position is still valid within the region.
return start + pos
elif allowNearest:
# The position within the region isn't valid,
# but the region is valid, so return its start.
return start
break
if allowNearest:
# Resort to the start of the last region.
return start
raise LookupError("No such position")
def bufferPositionsToRawText(self, startPos, endPos):
return self.rawText[self.brailleToRawPos[startPos]:self.brailleToRawPos[endPos-1]+1]
def bufferPosToWindowPos(self, bufferPos):
if not (self.windowStartPos <= bufferPos < self.windowEndPos):
raise LookupError("Buffer position not in window")
return bufferPos - self.windowStartPos
def _get_windowEndPos(self):
endPos = self.windowStartPos + self.handler.displaySize
cellsLen = len(self.brailleCells)
if endPos >= cellsLen:
return cellsLen
if not config.conf["braille"]["wordWrap"]:
return endPos
try:
# Try not to split words across windows.
# To do this, break after the furthest possible space.
return min(rindex(self.brailleCells, 0, self.windowStartPos, endPos) + 1,
endPos)
except ValueError:
pass
return endPos
def _set_windowEndPos(self, endPos):
"""Sets the end position for the braille window and recalculates the window start position based on several variables.
1. Braille display size.
2. Whether one of the regions should be shown hard left on the braille display;
i.e. because of The configuration setting for focus context representation
or whether the braille region that corresponds with the focus represents a multi line edit box.
3. Whether word wrap is enabled."""
startPos = endPos - self.handler.displaySize
# Loop through the currently displayed regions in reverse order
# If focusToHardLeft is set for one of the regions, the display shouldn't scroll further back than the start of that region
for region, regionStart, regionEnd in reversed(list(self.regionsWithPositions)):
if regionStart<endPos:
if region.focusToHardLeft:
# Only scroll to the start of this region.
restrictPos = regionStart
break
elif config.conf["braille"]["focusContextPresentation"]!=CONTEXTPRES_CHANGEDCONTEXT:
# We aren't currently dealing with context change presentation
# thus, we only need to consider the last region
# since it doesn't have focusToHardLeftSet, the window start position isn't restricted
restrictPos = 0
break
else:
restrictPos = 0
if startPos <= restrictPos:
self.windowStartPos = restrictPos
return
if not config.conf["braille"]["wordWrap"]:
self.windowStartPos = startPos
return
try:
# Try not to split words across windows.
# To do this, break after the furthest possible block of spaces.
# Find the start of the first block of spaces.
# Search from 1 cell before in case startPos is just after a space.
startPos = self.brailleCells.index(0, startPos - 1, endPos)
# Skip past spaces.
for startPos in xrange(startPos, endPos):
if self.brailleCells[startPos] != 0:
break
except ValueError:
pass
self.windowStartPos = startPos
def _nextWindow(self):
oldStart = self.windowStartPos
end = self.windowEndPos
if end < len(self.brailleCells):
self.windowStartPos = end
return self.windowStartPos != oldStart
def scrollForward(self):
if not self._nextWindow():
# The window could not be scrolled, so try moving to the next line.
if self.regions:
self.regions[-1].nextLine()
else:
# Scrolling succeeded.
self.updateDisplay()
def _previousWindow(self):
start = self.windowStartPos
if start > 0:
self.windowEndPos = start
return self.windowStartPos != start
def scrollBack(self):
if not self._previousWindow():
# The window could not be scrolled, so try moving to the previous line.
if self.regions:
self.regions[-1].previousLine()
else:
# Scrolling succeeded.
self.updateDisplay()
def scrollTo(self, region, pos):
pos = self.regionPosToBufferPos(region, pos)
while pos >= self.windowEndPos:
if not self._nextWindow():
break
while pos < self.windowStartPos:
if not self._previousWindow():
break
self.updateDisplay()
def focus(self, region):
"""Bring the specified region into focus.
The region is placed at the start of the display.
However, if the region has not set L{Region.focusToHardLeft} and there is extra space at the end of the display, the display is scrolled left so that as much as possible is displayed.
@param region: The region to focus.
@type region: L{Region}
"""
pos = self.regionPosToBufferPos(region, 0)
self.windowStartPos = pos
if region.focusToHardLeft or config.conf["braille"]["focusContextPresentation"]==CONTEXTPRES_SCROLL:
return
end = self.windowEndPos
if end - pos < self.handler.displaySize:
# We can fit more on the display while still keeping pos visible.
# Force windowStartPos to be recalculated based on windowEndPos.
self.windowEndPos = end
def update(self):
self.rawText = ""
self.brailleCells = []
self.cursorPos = None
start = 0
if log.isEnabledFor(log.IO):
logRegions = []
for region in self.visibleRegions:
rawText = region.rawText
if log.isEnabledFor(log.IO):
logRegions.append(rawText)
cells = region.brailleCells
self.rawText+=rawText
self.brailleCells.extend(cells)
if region.brailleCursorPos is not None:
self.cursorPos = start + region.brailleCursorPos
start += len(cells)
if log.isEnabledFor(log.IO):
log.io("Braille regions text: %r" % logRegions)
def updateDisplay(self):
if self is self.handler.buffer:
self.handler.update()
def _get_cursorWindowPos(self):
if self.cursorPos is None:
return None
try:
return self.bufferPosToWindowPos(self.cursorPos)
except LookupError:
return None
def _get_windowRawText(self):
return self.bufferPositionsToRawText(self.windowStartPos,self.windowEndPos)
def _get_windowBrailleCells(self):
return self.brailleCells[self.windowStartPos:self.windowEndPos]
def routeTo(self, windowPos):
pos = self.windowStartPos + windowPos
if pos >= self.windowEndPos:
return
region, pos = self.bufferPosToRegionPos(pos)
region.routeTo(pos)
def getTextInfoForWindowPos(self, windowPos):
pos = self.windowStartPos + windowPos
if pos >= self.windowEndPos:
return None
region, pos = self.bufferPosToRegionPos(pos)
if not isinstance(region, TextInfoRegion):
return None
return region.getTextInfoForBraillePos(pos)
def saveWindow(self):
"""Save the current window so that it can be restored after the buffer is updated.
The window start position is saved as a position relative to a region.
This allows it to be restored even after other regions are added, removed or updated.
It can be restored with L{restoreWindow}.
@postcondition: The window is saved and can be restored with L{restoreWindow}.
"""
self._savedWindow = self.bufferPosToRegionPos(self.windowStartPos)
def restoreWindow(self):
"""Restore the window saved by L{saveWindow}.
@precondition: L{saveWindow} has been called.
@postcondition: If the saved position is valid, the window is restored.
Otherwise, the nearest position is restored.
"""
region, pos = self._savedWindow
self.windowStartPos = self.regionPosToBufferPos(region, pos, allowNearest=True)
_cachedFocusAncestorsEnd = 0
def invalidateCachedFocusAncestors(index):
"""Invalidate cached focus ancestors from a given index.
This will cause regions to be generated for the focus ancestors >= index next time L{getFocusContextRegions} is called,
rather than using cached regions for those ancestors.
@param index: The index from which cached focus ancestors should be invalidated.
@type index: int
"""
global _cachedFocusAncestorsEnd
# There could be multiple calls to this function before getFocusContextRegions() is called.
_cachedFocusAncestorsEnd = min(_cachedFocusAncestorsEnd, index)
def getFocusContextRegions(obj, oldFocusRegions=None):
global _cachedFocusAncestorsEnd
# Late import to avoid circular import.
from treeInterceptorHandler import TreeInterceptor
ancestors = api.getFocusAncestors()
ancestorsEnd = len(ancestors)
if isinstance(obj, TreeInterceptor):
obj = obj.rootNVDAObject
# We only want the ancestors of the buffer's root NVDAObject.
if obj != api.getFocusObject():
# Search backwards through the focus ancestors to find the index of obj.
for index, ancestor in itertools.izip(xrange(len(ancestors) - 1, 0, -1), reversed(ancestors)):
if obj == ancestor:
ancestorsEnd = index
break
if oldFocusRegions:
# We have the regions from the previous focus, so use them as a cache to avoid rebuilding regions which are the same.
# We need to generate new regions from _cachedFocusAncestorsEnd onwards.
# However, we must ensure that it is not beyond the last ancestor we wish to consider.
# Also, we don't ever want to fetch ancestor 0 (the desktop).
newAncestorsStart = max(min(_cachedFocusAncestorsEnd, ancestorsEnd), 1)
# Search backwards through the old regions to find the last common region.
for index, region in itertools.izip(xrange(len(oldFocusRegions) - 1, -1, -1), reversed(oldFocusRegions)):
ancestorIndex = getattr(region, "_focusAncestorIndex", None)
if ancestorIndex is None:
continue
if ancestorIndex < newAncestorsStart:
# This is the last common region.
# An ancestor may have been skipped and not have a region, which means that we need to grab new ancestors from this point.
newAncestorsStart = ancestorIndex + 1
commonRegionsEnd = index + 1
break
else:
# No common regions were found.
commonRegionsEnd = 0
newAncestorsStart = 1
# Yield the common regions.
for region in oldFocusRegions[0:commonRegionsEnd]:
# We are setting focusToHardLeft to False for every cached region.
# This is necessary as BrailleHandler._doNewObject checks focusToHardLeft on every region
# and sets it to True for the first focus region if the context didn't change.
# If we don't do this, BrailleHandler._doNewObject can't set focusToHardLeft properly.
region.focusToHardLeft = False
yield region
else:
# Fetch all ancestors.
newAncestorsStart = 1
focusToHardLeftSet = False
for index, parent in enumerate(ancestors[newAncestorsStart:ancestorsEnd], newAncestorsStart):
if not parent.isPresentableFocusAncestor:
continue
region = NVDAObjectRegion(parent, appendText=TEXT_SEPARATOR)
region._focusAncestorIndex = index
if config.conf["braille"]["focusContextPresentation"]==CONTEXTPRES_CHANGEDCONTEXT and not focusToHardLeftSet:
# We are presenting context changes to the user
# Thus, only scroll back as far as the start of the first new focus ancestor
# focusToHardLeftSet is used since the first new ancestor isn't always represented by a region
region.focusToHardLeft = True
focusToHardLeftSet = True
region.update()
yield region
_cachedFocusAncestorsEnd = ancestorsEnd
def getFocusRegions(obj, review=False):
# Allow objects to override normal behaviour.
try:
regions = obj.getBrailleRegions(review=review)
except (AttributeError, NotImplementedError):
pass
else:
for region in regions:
region.update()
yield region
return
# Late import to avoid circular import.
from treeInterceptorHandler import TreeInterceptor, DocumentTreeInterceptor
from cursorManager import CursorManager
from NVDAObjects import NVDAObject
if isinstance(obj, CursorManager):
region2 = (ReviewTextInfoRegion if review else CursorManagerRegion)(obj)
elif isinstance(obj, DocumentTreeInterceptor) or (isinstance(obj,NVDAObject) and NVDAObjectHasUsefulText(obj)):
region2 = (ReviewTextInfoRegion if review else TextInfoRegion)(obj)
else:
region2 = None
if isinstance(obj, TreeInterceptor):
obj = obj.rootNVDAObject
region = NVDAObjectRegion(obj, appendText=TEXT_SEPARATOR if region2 else "")
region.update()
yield region
if region2:
region2.update()
yield region2
def formatCellsForLog(cells):
"""Formats a sequence of braille cells so that it is suitable for logging.
The output contains the dot numbers for each cell, with each cell separated by a space.
A C{-} indicates an empty cell.
@param cells: The cells to format.
@type cells: sequence of int
@return: The formatted cells.
@rtype: str
"""
# optimisation: This gets called a lot, so needs to be as efficient as possible.
# List comprehensions without function calls are faster than loops.
# For str.join, list comprehensions are faster than generator comprehensions.
return TEXT_SEPARATOR.join([
"".join([str(dot + 1) for dot in xrange(8) if cell & (1 << dot)])
if cell else "-"
for cell in cells])
class BrailleHandler(baseObject.AutoPropertyObject):
TETHER_AUTO = "auto"
TETHER_FOCUS = "focus"
TETHER_REVIEW = "review"
tetherValues=[
# Translators: The label for a braille setting indicating that braille should be
# tethered to focus or review cursor automatically.
(TETHER_AUTO,_("automatically")),
# Translators: The label for a braille setting indicating that braille should be tethered to focus.
(TETHER_FOCUS,_("to focus")),
# Translators: The label for a braille setting indicating that braille should be tethered to the review cursor.
(TETHER_REVIEW,_("to review"))
]
def __init__(self):
louisHelper.initialize()
self.display = None
self.displaySize = 0
self.mainBuffer = BrailleBuffer(self)
self.messageBuffer = BrailleBuffer(self)
self._messageCallLater = None
self.buffer = self.mainBuffer
#: Whether braille is enabled.
#: @type: bool
self.enabled = False
self._keyCountForLastMessage=0
self._cursorPos = None
self._cursorBlinkUp = True
self._cells = []
self._cursorBlinkTimer = None
config.post_configProfileSwitch.register(self.handlePostConfigProfileSwitch)
self._tether = config.conf["braille"]["tetherTo"]
self._detectionEnabled = False
self._detector = None
def terminate(self):
bgThreadStopTimeout = 2.5 if self._detectionEnabled else None
self._disableDetection()
if self._messageCallLater:
self._messageCallLater.Stop()
self._messageCallLater = None
if self._cursorBlinkTimer:
self._cursorBlinkTimer.Stop()
self._cursorBlinkTimer = None
config.post_configProfileSwitch.unregister(self.handlePostConfigProfileSwitch)
if self.display:
self.display.terminate()
self.display = None
_BgThread.stop(timeout=bgThreadStopTimeout)
louisHelper.terminate()
def getTether(self):
return self._tether
def _get_tether(self):
"""@deprecated: Use L{getTether instead."""
return self.getTether()
def setTether(self, tether, auto=False):
if auto and not self.shouldAutoTether:
return
if not auto:
config.conf["braille"]["tetherTo"] = tether
if tether == self._tether:
return
self._tether = tether
self.mainBuffer.clear()
def _set_tether(self, tether):
"""@deprecated: Use L{setTether instead."""
self.setTether(tether, auto=False)
def _get_shouldAutoTether(self):
return self.enabled and config.conf["braille"]["autoTether"]
_lastRequestedDisplayName=None #: the name of the last requested braille display driver with setDisplayByName, even if it failed and has fallen back to no braille.
def setDisplayByName(self, name, isFallback=False, detected=None):
if not isFallback:
# #8032: Take note of the display requested, even if it is going to fail.
self._lastRequestedDisplayName=name
if name == AUTO_DISPLAY_NAME:
self._enableDetection()
return True
elif not isFallback and not detected:
self._disableDetection()
kwargs = {}
if detected:
kwargs["port"]=detected
else:
# See if the user has defined a specific port to connect to
if name not in config.conf["braille"]:
# No port was set.
config.conf["braille"][name] = {"port" : ""}
port = config.conf["braille"][name].get("port")
# Here we try to keep compatible with old drivers that don't support port setting
# or situations where the user hasn't set any port.
if port:
kwargs["port"] = port
try:
newDisplay = _getDisplayDriver(name)
if detected and bdDetect._isDebug():
log.debug("Possibly detected display '%s'" % newDisplay.description)
if newDisplay == self.display.__class__:
# This is the same driver as was already set, so just re-initialise it.
log.debug("Reinitializing %s braille display"%name)
self.display.terminate()
newDisplay = self.display
try:
newDisplay.__init__(**kwargs)
except TypeError:
# Re-initialize with supported kwargs.
extensionPoints.callWithSupportedKwargs(newDisplay.__init__, **kwargs)
else:
if newDisplay.isThreadSafe and not detected:
# Start the thread if it wasn't already.
# Auto detection implies the thread is already started.
_BgThread.start()
try:
newDisplay = newDisplay(**kwargs)
except TypeError:
newDisplay = newDisplay.__new__(newDisplay)
# initialize with supported kwargs.
extensionPoints.callWithSupportedKwargs(newDisplay.__init__, **kwargs)
if self.display:
log.debug("Switching braille display from %s to %s"%(self.display.name,name))
try:
self.display.terminate()
except:
log.error("Error terminating previous display driver", exc_info=True)
self.display = newDisplay
self.displaySize = newDisplay.numCells
self.enabled = bool(self.displaySize)
if isFallback:
self._resumeDetection()
elif not detected:
config.conf["braille"]["display"] = name
else: # detected:
self._disableDetection()
log.info("Loaded braille display driver %s, current display has %d cells." %(name, self.displaySize))
self.initialDisplay()
return True
except:
# For auto display detection, logging an error for every failure is too obnoxious.
if not detected:
log.error("Error initializing display driver for kwargs %r"%kwargs, exc_info=True)
elif bdDetect._isDebug():
log.debugWarning("Couldn't initialize display driver for kwargs %r"%(kwargs,), exc_info=True)
self.setDisplayByName("noBraille", isFallback=True)
return False
def _updateDisplay(self):
if self._cursorBlinkTimer:
self._cursorBlinkTimer.Stop()
self._cursorBlinkTimer = None
self._cursorBlinkUp = showCursor = config.conf["braille"]["showCursor"]
self._displayWithCursor()
if self._cursorPos is None or not showCursor:
return
cursorShouldBlink = config.conf["braille"]["cursorBlink"]
blinkRate = config.conf["braille"]["cursorBlinkRate"]
if cursorShouldBlink and blinkRate:
self._cursorBlinkTimer = gui.NonReEntrantTimer(self._blink)
# This is called from the background thread when a display is auto detected.
# Make sure we start the blink timer from the main thread to avoid wx assertions
wx.CallAfter(self._cursorBlinkTimer.Start,blinkRate)
def _writeCells(self, cells):
if not self.display.isThreadSafe:
try:
self.display.display(cells)
except:
log.error("Error displaying cells. Disabling display", exc_info=True)
self.handleDisplayUnavailable()
return
with _BgThread.queuedWriteLock:
alreadyQueued = _BgThread.queuedWrite
_BgThread.queuedWrite = cells
# If a write was already queued, we don't need to queue another;
# we just replace the data.
# This means that if multiple writes occur while an earlier write is still in progress,
# we skip all but the last.
if not alreadyQueued and not self.display._awaitingAck:
# Queue a call to the background thread.
_BgThread.queueApc(_BgThread.executor)
def _displayWithCursor(self):
if not self._cells:
return
cells = list(self._cells)
if self._cursorPos is not None and self._cursorBlinkUp:
if self.tether == self.TETHER_FOCUS:
cells[self._cursorPos] |= config.conf["braille"]["cursorShapeFocus"]
else:
cells[self._cursorPos] |= config.conf["braille"]["cursorShapeReview"]
self._writeCells(cells)
def _blink(self):
self._cursorBlinkUp = not self._cursorBlinkUp
self._displayWithCursor()
def update(self):
cells = self.buffer.windowBrailleCells
if log.isEnabledFor(log.IO):
log.io("Braille window dots: %s" % formatCellsForLog(cells))
# cells might not be the full length of the display.
# Therefore, pad it with spaces to fill the display.
self._cells = cells + [0] * (self.displaySize - len(cells))
self._cursorPos = self.buffer.cursorWindowPos
self._updateDisplay()
def scrollForward(self):
self.buffer.scrollForward()
if self.buffer is self.messageBuffer:
self._resetMessageTimer()
def scrollBack(self):
self.buffer.scrollBack()
if self.buffer is self.messageBuffer:
self._resetMessageTimer()
def routeTo(self, windowPos):
self.buffer.routeTo(windowPos)
if self.buffer is self.messageBuffer:
self._dismissMessage()
def getTextInfoForWindowPos(self, windowPos):
if self.buffer is not self.mainBuffer:
return None
return self.buffer.getTextInfoForWindowPos(windowPos)
def message(self, text):
"""Display a message to the user which times out after a configured interval.
The timeout will be reset if the user scrolls the display.
The message will be dismissed immediately if the user presses a cursor routing key.
If a key is pressed the message will be dismissed by the next text being written to the display.
@postcondition: The message is displayed.
"""
if not self.enabled or config.conf["braille"]["messageTimeout"] == 0 or text is None:
return
if self.buffer is self.messageBuffer:
self.buffer.clear()
else:
self.buffer = self.messageBuffer
region = TextRegion(text)
region.update()
self.buffer.regions.append(region)
self.buffer.update()
self.update()
self._resetMessageTimer()
self._keyCountForLastMessage=keyboardHandler.keyCounter
def _resetMessageTimer(self):
"""Reset the message timeout.
@precondition: A message is currently being displayed.
"""
if config.conf["braille"]["noMessageTimeout"]:
return
# Configured timeout is in seconds.
timeout = config.conf["braille"]["messageTimeout"] * 1000
if self._messageCallLater:
self._messageCallLater.Restart(timeout)
else:
self._messageCallLater = wx.CallLater(timeout, self._dismissMessage)
def _dismissMessage(self):
"""Dismiss the current message.
@precondition: A message is currently being displayed.
@postcondition: The display returns to the main buffer.
"""
self.buffer.clear()
self.buffer = self.mainBuffer
if self._messageCallLater:
self._messageCallLater.Stop()
self._messageCallLater = None
self.update()
def handleGainFocus(self, obj, shouldAutoTether=True):
if not self.enabled:
return
if shouldAutoTether:
self.setTether(self.TETHER_FOCUS, auto=True)
if self._tether != self.TETHER_FOCUS:
return
if getattr(obj, "treeInterceptor", None) and not obj.treeInterceptor.passThrough:
obj = obj.treeInterceptor
self._doNewObject(itertools.chain(getFocusContextRegions(obj, oldFocusRegions=self.mainBuffer.regions), getFocusRegions(obj)))
def _doNewObject(self, regions):
self.mainBuffer.clear()
focusToHardLeftSet = False
for region in regions:
if self.tether == self.TETHER_FOCUS and config.conf["braille"]["focusContextPresentation"]==CONTEXTPRES_CHANGEDCONTEXT:
# Check focusToHardLeft for every region.
# If noone of the regions has focusToHardLeft set to True, set it for the first focus region.
if region.focusToHardLeft:
focusToHardLeftSet = True
elif not focusToHardLeftSet and getattr(region, "_focusAncestorIndex", None) is None:
# Going to display a new object with the same ancestry as the previously displayed object.
# So, set focusToHardLeft on this region
# For example, this applies when you are in a list and start navigating through it
region.focusToHardLeft = True
focusToHardLeftSet = True
self.mainBuffer.regions.append(region)
self.mainBuffer.update()
# Last region should receive focus.
self.mainBuffer.focus(region)
self.scrollToCursorOrSelection(region)
if self.buffer is self.mainBuffer:
self.update()
elif self.buffer is self.messageBuffer and keyboardHandler.keyCounter>self._keyCountForLastMessage:
self._dismissMessage()
def handleCaretMove(self, obj, shouldAutoTether=True):
if not self.enabled:
return
prevTether = self._tether
if shouldAutoTether:
self.setTether(self.TETHER_FOCUS, auto=True)
if self._tether != self.TETHER_FOCUS:
return
region = self.mainBuffer.regions[-1] if self.mainBuffer.regions else None
if region and region.obj==obj:
region.pendingCaretUpdate=True
elif prevTether == self.TETHER_REVIEW:
# The caret moved in a different object than the review position.
self._doNewObject(getFocusRegions(obj, review=False))
def handlePendingCaretUpdate(self):
"""Checks to see if the final text region needs its caret updated and if so calls _doCursorMove for the region."""
region=self.mainBuffer.regions[-1] if self.mainBuffer.regions else None
if isinstance(region,TextInfoRegion) and region.pendingCaretUpdate:
try:
self._doCursorMove(region)
finally:
region.pendingCaretUpdate=False
def _doCursorMove(self, region):
self.mainBuffer.saveWindow()
region.update()
self.mainBuffer.update()
self.mainBuffer.restoreWindow()
self.scrollToCursorOrSelection(region)
if self.buffer is self.mainBuffer:
self.update()
elif self.buffer is self.messageBuffer and keyboardHandler.keyCounter>self._keyCountForLastMessage:
self._dismissMessage()
def scrollToCursorOrSelection(self, region):
if region.brailleCursorPos is not None:
self.mainBuffer.scrollTo(region, region.brailleCursorPos)
elif not isinstance(region, TextInfoRegion) or not region.obj.isTextSelectionAnchoredAtStart:
# It is unknown where the selection is anchored, or it is anchored at the end.
if region.brailleSelectionStart is not None:
self.mainBuffer.scrollTo(region, region.brailleSelectionStart)
elif region.brailleSelectionEnd is not None:
# The selection is anchored at the start.
self.mainBuffer.scrollTo(region, region.brailleSelectionEnd - 1)
# #6862: The value change of a progress bar change often goes together with changes of other objects in the dialog,
# e.g. the time remaining. Therefore, update the dialog when a contained progress bar changes.
def _handleProgressBarUpdate(self, obj):
oldTime = getattr(self, "_lastProgressBarUpdateTime", None)
newTime = time.time()
if oldTime and newTime - oldTime < 1:
# Fetching dialog text is expensive, so update at most once a second.
return
self._lastProgressBarUpdateTime = newTime
for obj in reversed(api.getFocusAncestors()[:-1]):
if obj.role == controlTypes.ROLE_DIALOG:
self.handleUpdate(obj)
return
def handleUpdate(self, obj):
if not self.enabled:
return
# Optimisation: It is very likely that it is the focus object that is being updated.
# If the focus object is in the braille buffer, it will be the last region, so scan the regions backwards.
for region in reversed(list(self.mainBuffer.visibleRegions)):
if hasattr(region, "obj") and region.obj == obj:
break
else:
# No region for this object.
# There are some objects that require special update behavior even if they have no region.
# This only applies when tethered to focus, because tethering to review shows only one object at a time,
# which always has a braille region associated with it.
if self._tether != self.TETHER_FOCUS:
return
# Late import to avoid circular import.
from NVDAObjects import NVDAObject
if isinstance(obj, NVDAObject) and obj.role == controlTypes.ROLE_PROGRESSBAR and obj.isInForeground:
self._handleProgressBarUpdate(obj)
return
self.mainBuffer.saveWindow()
region.update()
self.mainBuffer.update()
self.mainBuffer.restoreWindow()
if self.buffer is self.mainBuffer:
self.update()
elif self.buffer is self.messageBuffer and keyboardHandler.keyCounter>self._keyCountForLastMessage:
self._dismissMessage()
def handleReviewMove(self, shouldAutoTether=True):
if not self.enabled:
return
reviewPos = api.getReviewPosition()
if shouldAutoTether:
self.setTether(self.TETHER_REVIEW, auto=True)
if self._tether != self.TETHER_REVIEW:
return
region = self.mainBuffer.regions[-1] if self.mainBuffer.regions else None
if region and region.obj == reviewPos.obj:
self._doCursorMove(region)
else:
# We're reviewing a different object.
self._doNewObject(getFocusRegions(reviewPos.obj, review=True))
def initialDisplay(self):
if not self.enabled or not api.getDesktopObject():
# Braille is disabled or focus/review hasn't yet been initialised.
return
if self.tether == self.TETHER_FOCUS:
self.handleGainFocus(api.getFocusObject(), shouldAutoTether=False)
else:
self.handleReviewMove(shouldAutoTether=False)
def handlePostConfigProfileSwitch(self):
display = config.conf["braille"]["display"]
# Do not choose a new display if:
if not (
# The display in the new profile is equal to the last requested display name
display == self._lastRequestedDisplayName
# or the new profile uses auto detection, which supports detection of the currently active display.
or (display == AUTO_DISPLAY_NAME and bdDetect.driverSupportsAutoDetection(self.display.name))
):
self.setDisplayByName(display)
self._tether = config.conf["braille"]["tetherTo"]
def handleDisplayUnavailable(self):
"""Called when the braille display becomes unavailable.
This logs an error and disables the display.
This is called when displaying cells raises an exception,
but drivers can also call it themselves if appropriate.
"""
log.error("Braille display unavailable. Disabling", exc_info=True)
self._detectionEnabled = config.conf["braille"]["display"] == AUTO_DISPLAY_NAME
self.setDisplayByName("noBraille", isFallback=True)
def _enableDetection(self):
"""Enables automatic detection of braille displays.
When auto detection is already active, this will force a rescan for devices.
"""
if self._detectionEnabled and self._detector:
self._detector.rescan()
return
_BgThread.start()
config.conf["braille"]["display"] = AUTO_DISPLAY_NAME
self.setDisplayByName("noBraille", isFallback=True)
self._detector = bdDetect.Detector()
self._detectionEnabled = True
def _disableDetection(self):
"""Disables automatic detection of braille displays."""
if not self._detectionEnabled:
return
if self._detector:
self._detector.terminate()
self._detector = None
self._detectionEnabled = False
def _resumeDetection(self):
"""Resumes automatic detection of braille displays.
This is executed when auto detection should be resumed due to loss of display connectivity.
"""
if not self._detectionEnabled or self._detector:
return
self._detector = bdDetect.Detector()
class _BgThread:
"""A singleton background thread used for background writes and raw braille display I/O.
"""
thread = None
exit = False
queuedWrite = None
@classmethod
def start(cls):
if cls.thread:
return
cls.queuedWriteLock = threading.Lock()
thread = cls.thread = threading.Thread(target=cls.func)
thread.daemon = True
thread.start()
cls.handle = ctypes.windll.kernel32.OpenThread(winKernel.THREAD_SET_CONTEXT, False, thread.ident)
cls.ackTimerHandle = winKernel.createWaitableTimer()
@classmethod
def queueApc(cls, func, param=0):
ctypes.windll.kernel32.QueueUserAPC(func, cls.handle, param)
@classmethod
def stop(cls, timeout=None):
if not cls.thread:
return
cls.exit = True
if not ctypes.windll.kernel32.CancelWaitableTimer(cls.ackTimerHandle):
raise ctypes.WinError()
winKernel.closeHandle(cls.ackTimerHandle)
cls.ackTimerHandle = None
# Wake up the thread. It will exit when it sees exit is True.
cls.queueApc(cls.executor)
cls.thread.join(timeout)
cls.exit = False
winKernel.closeHandle(cls.handle)
cls.handle = None
cls.thread = None
@winKernel.PAPCFUNC
def executor(param):
if _BgThread.exit:
# func will see this and exit.
return
if not handler.display:
# Sometimes, the executor is triggered when a display is not fully initialized.
# For example, this happens when handling an ACK during initialisation.
# We can safely ignore this.
return
if handler.display._awaitingAck:
# Do not write cells when we are awaiting an ACK
return
with _BgThread.queuedWriteLock:
data = _BgThread.queuedWrite
_BgThread.queuedWrite = None
if not data:
return
try:
handler.display.display(data)
except:
log.error("Error displaying cells. Disabling display", exc_info=True)
handler.handleDisplayUnavailable()
else:
if handler.display.receivesAckPackets:
handler.display._awaitingAck = True
winKernel.setWaitableTimer(
_BgThread.ackTimerHandle,
int(handler.display.timeout*2000),
0,
_BgThread.ackTimeoutResetter
)
@winKernel.PAPCFUNC
def ackTimeoutResetter(param):
if handler.display.receivesAckPackets and handler.display._awaitingAck:
log.debugWarning("Waiting for %s ACK packet timed out"%handler.display.name)
handler.display._awaitingAck = False
_BgThread.queueApc(_BgThread.executor)
@classmethod
def func(cls):
while True:
ctypes.windll.kernel32.SleepEx(winKernel.INFINITE, True)
if cls.exit:
break
#: Maps old braille display driver names to new drivers that supersede old drivers.
RENAMED_DRIVERS = {
"syncBraille":"hims",
"alvaBC6":"alva"
}
def initialize():
global handler
config.addConfigDirsToPythonPackagePath(brailleDisplayDrivers)
log.info("Using liblouis version %s" % louis.version())
# #6140: Migrate to new table names as smoothly as possible.
oldTableName = config.conf["braille"]["translationTable"]
newTableName = brailleTables.RENAMED_TABLES.get(oldTableName)
if newTableName:
config.conf["braille"]["translationTable"] = newTableName
handler = BrailleHandler()
# #7459: the syncBraille has been dropped in favor of the native hims driver.
# Migrate to renamed drivers as smoothly as possible.
oldDriverName = config.conf["braille"]["display"]
newDriverName = RENAMED_DRIVERS.get(oldDriverName)
if newDriverName:
config.conf["braille"]["display"] = newDriverName
handler.setDisplayByName(config.conf["braille"]["display"])
def pumpAll():
"""Runs tasks at the end of each core cycle. For now just caret updates."""
handler.handlePendingCaretUpdate()
def terminate():
global handler
handler.terminate()
handler = None
class BrailleDisplayDriver(baseObject.AutoPropertyObject):
"""Abstract base braille display driver.
Each braille display driver should be a separate Python module in the root brailleDisplayDrivers directory containing a BrailleDisplayDriver class which inherits from this base class.
At a minimum, drivers must set L{name} and L{description} and override the L{check} method.
To display braille, L{numCells} and L{display} must be implemented.
Drivers should dispatch input such as presses of buttons, wheels or other controls using the L{inputCore} framework.
They should subclass L{BrailleDisplayGesture} and execute instances of those gestures using L{inputCore.manager.executeGesture}.
These gestures can be mapped in L{gestureMap}.
A driver can also inherit L{baseObject.ScriptableObject} to provide display specific scripts.
@see: L{hwIo} for raw serial and HID I/O.
"""
#: The name of the braille display; must be the original module file name.
#: @type: str
name = ""
#: A description of the braille display.
#: @type: str
description = ""
#: Whether this driver is thread-safe.
#: If it is, NVDA may initialize, terminate or call this driver on any thread.
#: This allows NVDA to read from and write to the display in the background,
#: which means the rest of NVDA is not blocked while this occurs,
#: thus resulting in better performance.
#: This is also required to use the L{hwIo} module.
#: @type: bool
isThreadSafe = False
#: Whether displays for this driver return acknowledgements for sent packets.
#: L{_handleAck} should be called when an ACK is received.
#: Note that thread safety is required for the generic implementation to function properly.
#: If a display is not thread safe, a driver should manually implement ACK processing.
#: @type: bool
receivesAckPackets = False
#: Whether this driver is awaiting an Ack for a connected display.
#: This is set to C{True} after displaying cells when L{receivesAckPackets} is True,
#: and set to C{False} by L{_handleAck} or when C{timeout} has elapsed.
#: This is for internal use by NVDA core code only and shouldn't be touched by a driver itself.
_awaitingAck = False
#: Maximum timeout to use for communication with a device (in seconds).
#: This can be used for serial connections.
#: Furthermore, it is used by L{_BgThread} to stop waiting for missed acknowledgement packets.
#: @type: float
timeout = 0.2
@classmethod
def check(cls):
"""Determine whether this braille display is available.
The display will be excluded from the list of available displays if this method returns C{False}.
For example, if this display is not present, C{False} should be returned.
@return: C{True} if this display is available, C{False} if not.
@rtype: bool
"""
if cls.isThreadSafe:
if bdDetect.driverHasPossibleDevices(cls.name):
return True
try:
next(cls.getManualPorts())
except (StopIteration, NotImplementedError):
pass
else:
return True
return False
def terminate(self):
"""Terminate this display driver.
This will be called when NVDA is finished with this display driver.
It should close any open connections, perform cleanup, etc.
Subclasses should call the superclass method first.
@postcondition: This instance can no longer be used unless it is constructed again.
"""
# Clear the display.
try:
self.display([0] * self.numCells)
except:
# The display driver seems to be failing, but we're terminating anyway, so just ignore it.
pass
def _get_numCells(self):
"""Obtain the number of braille cells on this display.
@note: 0 indicates that braille should be disabled.
@return: The number of cells.
@rtype: int
"""
return 0
def display(self, cells):
"""Display the given braille cells.
@param cells: The braille cells to display.
@type cells: [int, ...]
"""
#: Automatic port constant to be used by braille displays that support the "automatic" port
#: Kept for backwards compatibility
AUTOMATIC_PORT = AUTOMATIC_PORT
@classmethod
def getPossiblePorts(cls):
""" Returns possible hardware ports for this driver.
Generally, drivers shouldn't implement this method directly.
Instead, they should provide automatic detection data via L{bdDetect}
and implement L{getPossibleManualPorts} if they support manual ports
such as serial ports.
@return: ordered dictionary of name : description for each port
@rtype: OrderedDict
"""
try:
next(bdDetect.getConnectedUsbDevicesForDriver(cls.name))
usb = True
except (LookupError, StopIteration):
usb = False
try:
next(bdDetect.getPossibleBluetoothDevicesForDriver(cls.name))
bluetooth = True
except (LookupError, StopIteration):
bluetooth = False
ports = collections.OrderedDict()
if usb or bluetooth:
ports.update((AUTOMATIC_PORT,))
if usb:
ports.update((USB_PORT,))
if bluetooth:
ports.update((BLUETOOTH_PORT,))
try:
ports.update(cls.getManualPorts())
except NotImplementedError:
pass
return ports
@classmethod
def _getAutoPorts(cls, usb=True, bluetooth=True):
"""Returns possible ports to connect to using L{bdDetect} automatic detection data.
@param usb: Whether to search for USB devices.
@type usb: bool
@param bluetooth: Whether to search for bluetooth devices.
@type bluetooth: bool
@return: The device match for each port.
@rtype: iterable of L{DeviceMatch}
"""
iters = []
if usb:
iters.append(bdDetect.getConnectedUsbDevicesForDriver(cls.name))
if bluetooth:
iters.append(bdDetect.getPossibleBluetoothDevicesForDriver(cls.name))
try:
for match in itertools.chain(*iters):
yield match
except LookupError:
pass
@classmethod
def getManualPorts(cls):
"""Get possible manual hardware ports for this driver.
This is for ports which cannot be detected automatically
such as serial ports.
@return: The name and description for each port.
@rtype: iterable of basestring, basestring
"""
raise NotImplementedError
@classmethod
def _getTryPorts(cls, port):
"""Returns the ports for this driver to which a connection attempt should be made.
This generator function is usually used in L{__init__} to connect to the desired display.
@param port: the port to connect to.
@type port: one of basestring or L{bdDetect.DeviceMatch}
@return: The name and description for each port.
@rtype: iterable of basestring, basestring
"""
if isinstance(port, bdDetect.DeviceMatch):
yield port
elif isinstance(port, basestring):
isUsb = port in (AUTOMATIC_PORT[0], USB_PORT[0])
isBluetooth = port in (AUTOMATIC_PORT[0], BLUETOOTH_PORT[0])
if not isUsb and not isBluetooth:
# Assume we are connecting to a com port, since these are the only manual ports supported.
try:
portInfo = next(info for info in hwPortUtils.listComPorts() if info["port"]==port)
except StopIteration:
pass
else:
if "bluetoothName" in portInfo:
yield bdDetect.DeviceMatch(bdDetect.KEY_SERIAL, portInfo["bluetoothName"], portInfo["port"], portInfo)
else:
yield bdDetect.DeviceMatch(bdDetect.KEY_SERIAL, portInfo["friendlyName"], portInfo["port"], portInfo)
else:
for match in cls._getAutoPorts(usb=isUsb, bluetooth=isBluetooth):
yield match
#: Global input gesture map for this display driver.
#: @type: L{inputCore.GlobalGestureMap}
gestureMap = None
@classmethod
def _getModifierGestures(cls, model=None):
"""Retrieves modifier gestures from this display driver's L{gestureMap}
that are bound to modifier only keyboard emulate scripts.
@param model: the optional braille display model for which modifier gestures should also be included.
@type model: str; C{None} if model specific gestures should not be included
@return: the ids of the display keys and the associated generalised modifier names
@rtype: generator of (set, set)
"""
import globalCommands
# Ignore the locale gesture map when searching for braille display gestures
globalMaps = [inputCore.manager.userGestureMap]
if cls.gestureMap:
globalMaps.append(cls.gestureMap)
prefixes=["br({source})".format(source=cls.name),]
if model:
prefixes.insert(0,"br({source}.{model})".format(source=cls.name, model=model))
for globalMap in globalMaps:
for scriptCls, gesture, scriptName in globalMap.getScriptsForAllGestures():
if (any(gesture.startswith(prefix.lower()) for prefix in prefixes)
and scriptCls is globalCommands.GlobalCommands
and scriptName and scriptName.startswith("kb")):
emuGesture = keyboardHandler.KeyboardInputGesture.fromName(scriptName.split(":")[1])
if emuGesture.isModifier:
yield set(gesture.split(":")[1].split("+")), set(emuGesture._keyNamesInDisplayOrder)
def _handleAck(self):
"""Base implementation to handle acknowledgement packets."""
if not self.receivesAckPackets:
raise NotImplementedError("This display driver does not support ACK packet handling")
if not ctypes.windll.kernel32.CancelWaitableTimer(_BgThread.ackTimerHandle):
raise ctypes.WinError()
self._awaitingAck = False
_BgThread.queueApc(_BgThread.executor)
class BrailleDisplayGesture(inputCore.InputGesture):
"""A button, wheel or other control pressed on a braille display.
Subclasses must provide L{source} and L{id}.
Optionally, L{model} can be provided to facilitate model specific gestures.
L{routingIndex} should be provided for routing buttons.
Subclasses can also inherit from L{brailleInput.BrailleInputGesture} if the display has a braille keyboard.
If the braille display driver is a L{baseObject.ScriptableObject}, it can provide scripts specific to input gestures from this display.
"""
def _get_source(self):
"""The string used to identify all gestures from this display.
This should generally be the driver name.
This string will be included in the source portion of gesture identifiers.
For example, if this was C{alvaBC6},
a display specific gesture identifier might be C{br(alvaBC6):etouch1}.
@rtype: str
"""
raise NotImplementedError
def _get_model(self):
"""The string used to identify all gestures from a specific braille display model.
This should be an alphanumeric short version of the model name, without spaces.
This string will be included in the source portion of gesture identifiers.
For example, if this was C{alvaBC6},
the model string could look like C{680},
and a corresponding display specific gesture identifier might be C{br(alvaBC6.680):etouch1}.
@rtype: str; C{None} if model specific gestures are not supported
"""
return None
def _get_id(self):
"""The unique, display specific id for this gesture.
@rtype: str
"""
raise NotImplementedError
#: The index of the routing key or C{None} if this is not a routing key.
#: @type: int
routingIndex = None
def _get_identifiers(self):
ids = [u"br({source}):{id}".format(source=self.source, id=self.id)]
if self.model:
# Model based ids should take priority.
ids.insert(0, u"br({source}.{model}):{id}".format(source=self.source, model=self.model, id=self.id))
import brailleInput
if isinstance(self, brailleInput.BrailleInputGesture):
ids.extend(brailleInput.BrailleInputGesture._get_identifiers(self))
return ids
def _get_displayName(self):
import brailleInput
if isinstance(self, brailleInput.BrailleInputGesture):
name = brailleInput.BrailleInputGesture._get_displayName(self)
if name:
return name
return self.id
def _get_scriptableObject(self):
display = handler.display
if isinstance(display, baseObject.ScriptableObject):
return display
return super(BrailleDisplayGesture, self).scriptableObject
def _get_script(self):
# Overrides L{inputCore.InputGesture._get_script} to support modifier keys.
# Also processes modifiers held by braille input.
# Import late to avoid circular import.
import brailleInput
gestureKeys = set(self.keyNames)
gestureModifiers = brailleInput.handler.currentModifiers.copy()
script=scriptHandler.findScript(self)
if script:
scriptName = script.__name__
if not (gestureModifiers and scriptName.startswith("script_kb:")):
self.script = script
return self.script
# Either no script for this gesture has been found, or braille input is holding modifiers.
# Process this gesture for possible modifiers if it consists of more than one key.
# For example, if L{self.id} is 'key1+key2',
# key1 is bound to 'kb:control' and key2 to 'kb:tab',
# this gesture should execute 'kb:control+tab'.
# Combining emulated modifiers with braille input (#7306) is not yet supported.
if len(gestureKeys)>1:
for keys, modifiers in handler.display._getModifierGestures(self.model):
if keys<gestureKeys:
gestureModifiers |= modifiers
gestureKeys -= keys
if not gestureModifiers:
return None
if gestureKeys != set(self.keyNames):
# Find a script for L{gestureKeys}.
id = "+".join(gestureKeys)
fakeGestureIds = [u"br({source}):{id}".format(source=self.source, id=id),]
if self.model:
fakeGestureIds.insert(0,u"br({source}.{model}):{id}".format(source=self.source, model=self.model, id=id))
scriptNames = []
globalMaps = [inputCore.manager.userGestureMap, handler.display.gestureMap]
for globalMap in globalMaps:
for fakeGestureId in fakeGestureIds:
scriptNames.extend(scriptName for cls, scriptName in globalMap.getScriptsForGesture(fakeGestureId.lower()) if scriptName and scriptName.startswith("kb"))
if not scriptNames:
# Gesture contains modifiers, but no keyboard emulate script exists for the gesture without modifiers
return None
# We can't bother about multiple scripts for a gesture, we will just use the first one
combinedScriptName = "kb:{modifiers}+{keys}".format(
modifiers="+".join(gestureModifiers),
keys=scriptNames[0].split(":")[1]
)
elif script and scriptName:
combinedScriptName = "kb:{modifiers}+{keys}".format(
modifiers="+".join(gestureModifiers),
keys=scriptName.split(":")[1]
)
else:
return None
self.script = scriptHandler._makeKbEmulateScript(combinedScriptName)
brailleInput.handler.currentModifiers.clear()
return self.script
def _get_keyNames(self):
"""The names of the keys that are part of this gesture.
@rtype: list
"""
return self.id.split("+")
#: Compiled regular expression to match an identifier including an optional model name
#: The model name should be an alphanumeric string without spaces.
#: @type: RegexObject
ID_PARTS_REGEX = re.compile(r"br\((\w+)(?:\.(\w+))?\):([\w+]+)", re.U)
@classmethod
def getDisplayTextForIdentifier(cls, identifier):
# Translators: Displayed when the source driver of a braille display gesture is unknown.
unknownDisplayDescription = _("Unknown braille display")
idParts = cls.ID_PARTS_REGEX.match(identifier)
if not idParts:
log.error("Invalid braille gesture identifier: %s"%identifier)
return unknownDisplayDescription, "malformed:%s"%identifier
source, modelName, key = idParts.groups()
# Optimisation: Do not try to get the braille display class if this identifier belongs to the current driver.
if handler.display.name.lower() == source.lower():
description = handler.display.description
else:
try:
description = _getDisplayDriver(source, caseSensitive=False).description
except ImportError:
description = unknownDisplayDescription
if modelName: # The identifier contains a model name
return description, "{modelName}: {key}".format(
modelName=modelName, key=key
)
else:
return description, key
inputCore.registerGestureSource("br", BrailleDisplayGesture)
def getSerialPorts(filterFunc=None):
"""Get available serial ports in a format suitable for L{BrailleDisplayDriver.getManualPorts}.
@param filterFunc: a function executed on every dictionary retrieved using L{hwPortUtils.listComPorts}.
For example, this can be used to filter by USB or Bluetooth com ports.
@type filterFunc: callable
"""
if filterFunc and not callable(filterFunc):
raise TypeError("The provided filterFunc is not callable")
for info in hwPortUtils.listComPorts():
if filterFunc and not filterFunc(info):
continue
if "bluetoothName" in info:
yield (info["port"],
# Translators: Name of a Bluetooth serial communications port.
_("Bluetooth Serial: {port} ({deviceName})").format(
port=info["port"],
deviceName=info["bluetoothName"]
))
else:
yield (info["port"],
# Translators: Name of a serial communications port.
_("Serial: {portName}").format(portName=info["friendlyName"]))
|
v2_serving.py
|
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import traceback
from typing import Dict
import mlrun
from mlrun.api.schemas import (
ModelEndpoint,
ModelEndpointMetadata,
ModelEndpointSpec,
ModelEndpointStatus,
)
from mlrun.artifacts import ModelArtifact # noqa: F401
from mlrun.config import config
from mlrun.utils import logger, now_date, parse_versioned_object_uri
class V2ModelServer:
"""base model serving class (v2), using similar API to KFServing v2 and Triton
The class is initialized automatically by the model server and can run locally
as part of a nuclio serverless function, or as part of a real-time pipeline
default model url is: /v2/models/<model>[/versions/<ver>]/operation
You need to implement two mandatory methods:
load() - download the model file(s) and load the model into memory
predict() - accept request payload and return prediction/inference results
you can override additional methods : preprocess, validate, postprocess, explain
you can add custom api endpoint by adding method op_xx(event), will be invoked by
calling the <model-url>/xx (operation = xx)
Example
-------
defining a class::
class MyClass(V2ModelServer):
def load(self):
# load and initialize the model and/or other elements
model_file, extra_data = self.get_model(suffix='.pkl')
self.model = load(open(model_file, "rb"))
def predict(self, request):
events = np.array(request['inputs'])
dmatrix = xgb.DMatrix(events)
result: xgb.DMatrix = self.model.predict(dmatrix)
return {"outputs": result.tolist()}
"""
def __init__(
self,
context,
name: str,
model_path: str = None,
model=None,
protocol=None,
**class_args,
):
self.name = name
self.version = ""
if ":" in name:
self.name, self.version = name.split(":", 1)
self.context = context
self.ready = False
self.error = ""
self.protocol = protocol or "v2"
self.model_path = model_path
self.model_spec: mlrun.artifacts.ModelArtifact = None
self._params = class_args
self._model_logger = (
_ModelLogPusher(self, context) if context.stream.enabled else None
)
self.metrics = {}
self.labels = {}
if model:
self.model = model
self.ready = True
def _load_and_update_state(self):
try:
self.load()
except Exception as exc:
self.error = exc
self.context.logger.error(traceback.format_exc())
raise RuntimeError(f"failed to load model {self.name}, {exc}")
self.ready = True
self.context.logger.info(f"model {self.name} was loaded")
def post_init(self, mode="sync"):
"""sync/async model loading, for internal use"""
if not self.ready:
if mode == "async":
t = threading.Thread(target=self._load_and_update_state)
t.start()
self.context.logger.info(f"started async model loading for {self.name}")
else:
self._load_and_update_state()
server = getattr(self.context, "_server", None) or getattr(
self.context, "server", None
)
if not server:
logger.warn("GraphServer not initialized for VotingEnsemble instance")
return
_init_endpoint_record(server, self)
def get_param(self, key: str, default=None):
"""get param by key (specified in the model or the function)"""
if key in self._params:
return self._params.get(key)
return self.context.get_param(key, default=default)
def set_metric(self, name: str, value):
"""set real time metric (for model monitoring)"""
self.metrics[name] = value
def get_model(self, suffix=""):
"""get the model file(s) and metadata from model store
the method returns a path to the model file and the extra data (dict of dataitem objects)
it also loads the model metadata into the self.model_spec attribute, allowing direct access
to all the model metadata attributes.
get_model is usually used in the model .load() method to init the model
Examples
--------
::
def load(self):
model_file, extra_data = self.get_model(suffix='.pkl')
self.model = load(open(model_file, "rb"))
categories = extra_data['categories'].as_df()
Parameters
----------
suffix : str
optional, model file suffix (when the model_path is a directory)
Returns
-------
str
(local) model file
dict
extra dataitems dictionary
"""
model_file, self.model_spec, extra_dataitems = mlrun.artifacts.get_model(
self.model_path, suffix
)
if self.model_spec and self.model_spec.parameters:
for key, value in self.model_spec.parameters.items():
self._params[key] = value
return model_file, extra_dataitems
def load(self):
"""model loading function, see also .get_model() method"""
if not self.ready and not self.model:
raise ValueError("please specify a load method or a model object")
def _check_readiness(self, event):
if self.ready:
return
if not event.trigger or event.trigger == "http":
raise RuntimeError(f"model {self.name} is not ready yet")
self.context.logger.info(f"waiting for model {self.name} to load")
for i in range(50): # wait up to 4.5 minutes
time.sleep(5)
if self.ready:
return
raise RuntimeError(f"model {self.name} is not ready {self.error}")
def _pre_event_processing_actions(self, event, op):
self._check_readiness(event)
request = self.preprocess(event.body, op)
if "id" not in request:
request["id"] = event.id
return self.validate(request, op)
def do_event(self, event, *args, **kwargs):
"""main model event handler method"""
start = now_date()
op = event.path.strip("/")
if op == "predict" or op == "infer":
# predict operation
request = self._pre_event_processing_actions(event, op)
try:
outputs = self.predict(request)
except Exception as exc:
if self._model_logger:
self._model_logger.push(start, request, op=op, error=exc)
raise exc
response = {
"id": request["id"],
"model_name": self.name,
"outputs": outputs,
}
if self.version:
response["model_version"] = self.version
elif op == "ready" and event.method == "GET":
# get model health operation
setattr(event, "terminated", True)
if self.ready:
event.body = self.context.Response()
else:
event.body = self.context.Response(
status_code=408, body=b"model not ready"
)
return event
elif op == "" and event.method == "GET":
# get model metadata operation
setattr(event, "terminated", True)
event.body = {
"name": self.name,
"version": self.version,
"inputs": [],
"outputs": [],
}
if self.model_spec:
event.body["inputs"] = self.model_spec.inputs
event.body["outputs"] = self.model_spec.outputs
return event
elif op == "explain":
# explain operation
request = self._pre_event_processing_actions(event, op)
try:
outputs = self.explain(request)
except Exception as exc:
if self._model_logger:
self._model_logger.push(start, request, op=op, error=exc)
raise exc
response = {
"id": request["id"],
"model_name": self.name,
"outputs": outputs,
}
if self.version:
response["model_version"] = self.version
elif hasattr(self, "op_" + op):
# custom operation (child methods starting with "op_")
response = getattr(self, "op_" + op)(event)
event.body = response
return event
else:
raise ValueError(f"illegal model operation {op}, method={event.method}")
response = self.postprocess(response)
if self._model_logger:
self._model_logger.push(start, request, response, op)
event.body = response
return event
def validate(self, request, operation):
"""validate the event body (after preprocess)"""
if self.protocol == "v2":
if "inputs" not in request:
raise Exception('Expected key "inputs" in request body')
if not isinstance(request["inputs"], list):
raise Exception('Expected "inputs" to be a list')
return request
def preprocess(self, request: Dict, operation) -> Dict:
"""preprocess the event body before validate and action"""
return request
def postprocess(self, request: Dict) -> Dict:
"""postprocess, before returning response"""
return request
def predict(self, request: Dict) -> Dict:
"""model prediction operation"""
raise NotImplementedError()
def explain(self, request: Dict) -> Dict:
"""model explain operation"""
raise NotImplementedError()
class _ModelLogPusher:
def __init__(self, model, context, output_stream=None):
self.model = model
self.verbose = context.verbose
self.hostname = context.stream.hostname
self.function_uri = context.stream.function_uri
self.stream_path = context.stream.stream_uri
self.stream_batch = int(context.get_param("log_stream_batch", 1))
self.stream_sample = int(context.get_param("log_stream_sample", 1))
self.output_stream = output_stream or context.stream.output_stream
self._worker = context.worker_id
self._sample_iter = 0
self._batch_iter = 0
self._batch = []
def base_data(self):
base_data = {
"class": self.model.__class__.__name__,
"worker": self._worker,
"model": self.model.name,
"version": self.model.version,
"host": self.hostname,
"function_uri": self.function_uri,
}
if getattr(self.model, "labels", None):
base_data["labels"] = self.model.labels
return base_data
def push(self, start, request, resp=None, op=None, error=None):
if error:
data = self.base_data()
data["request"] = request
data["op"] = op
data["when"] = str(start)
message = str(error)
if self.verbose:
message = f"{message}\n{traceback.format_exc()}"
data["error"] = message
self.output_stream.push([data])
return
self._sample_iter = (self._sample_iter + 1) % self.stream_sample
if self.output_stream and self._sample_iter == 0:
microsec = (now_date() - start).microseconds
if self.stream_batch > 1:
if self._batch_iter == 0:
self._batch = []
self._batch.append(
[request, op, resp, str(start), microsec, self.model.metrics]
)
self._batch_iter = (self._batch_iter + 1) % self.stream_batch
if self._batch_iter == 0:
data = self.base_data()
data["headers"] = [
"request",
"op",
"resp",
"when",
"microsec",
"metrics",
]
data["values"] = self._batch
self.output_stream.push([data])
else:
data = self.base_data()
data["request"] = request
data["op"] = op
data["resp"] = resp
data["when"] = str(start)
data["microsec"] = microsec
if getattr(self.model, "metrics", None):
data["metrics"] = self.model.metrics
self.output_stream.push([data])
def _init_endpoint_record(graph_server, model: V2ModelServer):
logger.info("Initializing endpoint records")
try:
project, uri, tag, hash_key = parse_versioned_object_uri(
graph_server.function_uri
)
if model.version:
versioned_model_name = f"{model.name}:{model.version}"
else:
versioned_model_name = f"{model.name}:latest"
model_endpoint = ModelEndpoint(
metadata=ModelEndpointMetadata(project=project, labels=model.labels),
spec=ModelEndpointSpec(
function_uri=graph_server.function_uri,
model=versioned_model_name,
model_class=model.__class__.__name__,
model_uri=model.model_path,
stream_path=config.model_endpoint_monitoring.store_prefixes.default.format(
project=project, kind="stream"
),
active=True,
),
status=ModelEndpointStatus(),
)
db = mlrun.get_run_db()
db.create_or_patch_model_endpoint(
project=project,
endpoint_id=model_endpoint.metadata.uid,
model_endpoint=model_endpoint,
)
except Exception as e:
logger.error("Failed to create endpoint record", exc=e)
|
tanenbaum.py
|
# Tanenbaum's solution
from threading import Semaphore, Thread
import time
from typing import Callable
def solution__tanenbaum():
PHILOSOPHERS = 5
state = ['thinking'] * PHILOSOPHERS
sem = [Semaphore(0) for _ in range(PHILOSOPHERS)]
mutex = Semaphore(1)
def philosopher(i: int, stop: Callable):
print(f'starting philosopher {i}')
time.sleep(1)
def log(msg):
loc = '\t\t' * i
print(f'{loc}[P{i}]{msg}')
def think():
log('think')
def eat():
log('eat')
def left(i): return (i + (PHILOSOPHERS - 1)) % PHILOSOPHERS
def right(i): return (i + 1) % PHILOSOPHERS
def get_fork(i):
mutex.acquire()
state[i] = 'hungry'
test(i)
mutex.release()
sem[i].acquire()
def put_fork(i):
mutex.acquire()
state[i] = 'thinking'
test(right(i))
test(left(i))
mutex.release()
def test(i):
if state[i] == 'hungry' and state[left(i)] != 'eating' and state[right(i)] != 'eating':
state[i] = 'eating'
sem[i].release()
while True:
think()
get_fork(i)
eat()
put_fork(i)
if stop():
log('stopping')
break
stop_threads = False
workers = []
for i in range(PHILOSOPHERS):
thr = Thread(daemon=True, target=philosopher, args=(i, lambda: stop_threads))
workers.append(thr)
thr.start()
input()
stop_threads = True
[thr.join() for thr in workers]
exit(0)
|
create_process_get_result__communication__Queue.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from multiprocessing import Process, Queue
def f(q, name):
q.put('Hello, ' + name)
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q, 'bob'))
p.start()
print(q.get()) # Hello, bob
p.join()
|
bucketloader.py
|
# this script will connect to the node in the given ini file and create "n" buckets
# and load data until program is terminated
# example python bucketloader.py -i node.ini -p count=6,delete=True,load=True,bucket_quota=219,prefix=cloud,threads=2
# it will delete all existing buckets and create 6 buckets each 219 MB and then it will start running load
# on each bucket , each load_runner will use 2 threads
import sys
import uuid
sys.path.append('.')
sys.path.append('lib')
from threading import Thread
import TestInput
from membase.helper.bucket_helper import BucketOperationHelper
from memcached.helper.data_helper import MemcachedClientHelper
from membase.api.rest_client import RestConnection
def create_buckets(server, count, prefix, quota):
rest = RestConnection(server)
buckets = rest.get_buckets()
if len(buckets) < count:
delta = count - len(buckets)
info = rest.get_nodes_self()
replica = 1
#TODO: calculate the bucket_ram from available_ram // delta
#verify that the given quota makes sense
#how much ram is used by all buckets
for i in range(0, delta):
hash_string = str(uuid.uuid4())
name = "{0}-{1}-{2}".format(prefix, i, hash_string[:4])
rest.create_bucket(bucket=name,
ramQuotaMB=quota,
replicaNumber=replica,
proxyPort=info.memcached)
print("created bucket {0}".format(name))
def load_buckets(server, name, get, threads, moxi):
distro = {500: 0.5, 1024: 0.5}
MemcachedClientHelper.load_bucket([server], name, -1, 10000000, distro, threads, -1, get, moxi)
if __name__ == "__main__":
try:
input = TestInput.TestInputParser.get_test_input(sys.argv)
server = input.servers[0]
params = input.test_params
count = 0
get = True
moxi = False
bucket_quota = 200
run_load = False
delete = False
prefix = "membase-"
no_threads = 6
if "count" in params:
count = int(params["count"])
print(count)
if "load" in params:
if params["load"].lower() == "true":
run_load = True
if "threads" in params:
no_threads = params["threads"]
if "bucket_quota" in params:
bucket_quota = params["bucket_quota"]
if "moxi" in params:
if params["moxi"].lower() == "true":
moxi = True
if "get" in params:
if params["get"].lower() == "true":
get = True
if "prefix" in params:
prefix = params["prefix"]
if "delete" in params:
if params["delete"].lower() == "true":
delete = True
if delete:
BucketOperationHelper.delete_all_buckets_or_assert([server], None)
create_buckets(server, count, prefix, bucket_quota)
if run_load:
rest = RestConnection(server)
buckets = rest.get_buckets()
threads = []
for bucket in buckets:
t = Thread(target=load_buckets, args=(server, bucket.name, get, no_threads, moxi))
t.start()
threads.append(t)
for t in threads:
t.join()
except Exception as ex:
print(ex)
|
server.py
|
'''
Run this file before playing in Arena mode.
This code has been sourced in its entirely from the provided module. Only few
small changes have been made to adjust to the variables I was using.
https://kdchin.gitbooks.io/sockets-module-manual/content/
'''
####################################
# Modules
####################################
import socket
import threading
from queue import Queue
####################################
# Global Variables
####################################
# HOST = "" # IP Address
BACKLOG = 4
from port import HOST, PORT
####################################
# Server
####################################
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST,PORT))
server.listen(BACKLOG)
print("Running on port %d..." % PORT)
####################################
# Server Functions
####################################
def handleClient(client, serverChannel, cID, clientele):
client.setblocking(1)
msg = ""
while True:
try:
msg += client.recv(10).decode("UTF-8")
command = msg.split("\n")
while (len(command) > 1):
readyMsg = command[0]
msg = "\n".join(command[1:])
serverChannel.put(str(cID) + " " + readyMsg)
command = msg.split("\n")
except:
# we failed
return
def serverThread(clientele, serverChannel):
while True:
msg = serverChannel.get(True, None)
print("Message Received: ", msg)
msgList = msg.split(" ")
senderID = msgList[0]
instruction = msgList[1]
details = " ".join(msgList[2:])
if (details != ""):
for cID in clientele:
if cID != senderID:
sendMsg = instruction + " " + senderID + " " + details + "\n"
clientele[cID].send(sendMsg.encode())
print("> Sent to %s:" % cID, sendMsg[:-1])
print()
serverChannel.task_done()
####################################
# Add clients
####################################
clientele = dict()
playerNum = 0
serverChannel = Queue(100)
threading.Thread(target = serverThread, args = (clientele, serverChannel)).start()
names = ["Player0", "Player1"]
while playerNum < 2:
client, address = server.accept()
# myID is the key to the client in the clientele dictionary
myID = names[playerNum]
print(myID, playerNum)
for cID in clientele:
print (repr(cID), repr(playerNum))
clientele[cID].send(("newPlayer %s\n" % myID).encode())
client.send(("newPlayer %s\n" % cID).encode())
clientele[myID] = client
client.send(("myIDis %s \n" % myID).encode())
print("Connection recieved from %s" % myID)
threading.Thread(target = handleClient, args =
(client ,serverChannel, myID, clientele)).start()
playerNum += 1
|
opencv.py
|
import logging
from collections import deque
from contextlib import contextmanager
from itertools import cycle
from time import monotonic, sleep
from threading import Event, Thread
from typing import (
Callable, ContextManager, Deque, Iterable, Iterator,
Generic,
Optional,
TypeVar
)
from cv2 import cv2
import numpy as np
from .io import get_file
from .image import ImageArray, ImageSize, rgb_to_bgr, bgr_to_rgb, get_image_size
LOGGER = logging.getLogger(__name__)
DEFAULT_WEBCAM_FOURCC = 'MJPG'
T = TypeVar('T')
# Note: extending cv2.VideoCapture seem to cause core dumps
class VideoCaptureWrapper:
def __init__(self, *args, **kwargs):
self._released = False
self._reading_count = 0
self._not_reading_event = Event()
self.video_capture = cv2.VideoCapture(*args, **kwargs)
def release(self):
if self._released:
LOGGER.warning('attempting to release already release video capture')
return
self._released = True
if self._reading_count > 0:
LOGGER.warning(
'releasing video capture while reading is in progress (%d)',
self._reading_count
)
self._not_reading_event.wait(10)
LOGGER.info('releasing video capture')
self.video_capture.release()
def read(self):
if self._released:
LOGGER.warning('attempting to read already release video capture')
return False, None
if not self.video_capture.isOpened():
LOGGER.warning('attempting to read closed video capture')
return False, None
try:
self._reading_count += 1
self._not_reading_event.clear()
return self.video_capture.read()
finally:
self._reading_count -= 1
if self._reading_count == 0:
self._not_reading_event.set()
def get(self, propId):
if self._released:
LOGGER.warning('attempting to get property of release video capture')
return 0
return self.video_capture.get(propId)
def set(self, propId, value):
if self._released:
LOGGER.warning('attempting to set property of release video capture')
return
self.video_capture.set(propId, value)
class WaitingDeque(Generic[T]):
def __init__(self, max_length: int):
self.deque: Deque[T] = deque(maxlen=max_length)
self.changed_event = Event()
def append(self, data: T):
self.deque.append(data)
self.changed_event.set()
def peek(self, default_value: T = None) -> Optional[T]:
try:
return self.deque[-1]
except IndexError:
return default_value
def pop(self, timeout: float = None) -> T:
self.changed_event.clear()
try:
return self.deque.pop()
except IndexError:
pass
self.changed_event.wait(timeout=timeout)
return self.deque.pop()
class ReadLatestThreadedReader(Generic[T]):
def __init__(
self,
iterable: Iterable[T],
stopped_event: Optional[Event] = None,
wait_for_data: bool = False
):
self.iterable = iterable
self.thread = Thread(target=self.read_all_loop, daemon=False)
self.data_deque = WaitingDeque[T](max_length=1)
if stopped_event is None:
stopped_event = Event()
self.stopped_event = stopped_event
self.wait_for_data = wait_for_data
def __enter__(self):
LOGGER.debug('starting reader thread')
self.start()
return self
def __exit__(self, *_, **__):
self.stop()
return False
def __iter__(self):
return self
def __next__(self):
if self.wait_for_data:
return self.pop()
data = self.peek()
if data is None:
raise StopIteration()
return data
def start(self):
self.thread.start()
def stop(self):
self.stopped_event.set()
def peek(self) -> Optional[T]:
while True:
data = self.data_deque.peek()
if data is not None:
return data
if self.stopped_event.is_set():
return None
# wait for first frame (subsequent frames will always be available)
sleep(0.01)
def pop(self, timeout: float = None) -> T:
LOGGER.debug('waiting for data..')
return self.data_deque.pop(timeout=timeout)
def read_all_loop(self):
while not self.stopped_event.is_set():
try:
self.data_deque.append(next(self.iterable))
LOGGER.debug('read data')
except StopIteration:
LOGGER.info('read thread stopped, due to exhausted iterable')
self.stopped_event.set()
return
LOGGER.info('reader thread stopped, due to event')
self.stopped_event.set()
def iter_read_threaded(iterable: Iterable[T], **kwargs) -> Iterable[T]:
with ReadLatestThreadedReader[T](iterable, **kwargs) as reader:
yield from reader
def iter_read_raw_video_images(
video_capture: cv2.VideoCapture,
repeat: bool = False,
is_stopped: Callable[[], bool] = None
) -> Iterable[ImageArray]:
while is_stopped is None or not is_stopped():
grabbed, image_array = video_capture.read()
if not grabbed:
LOGGER.info('video end reached')
if not repeat:
return
video_capture.set(cv2.CAP_PROP_POS_FRAMES, 0)
grabbed, image_array = video_capture.read()
if not grabbed:
LOGGER.info('unable to rewind video')
return
yield image_array
def iter_resize_video_images(
video_images: Iterable[ImageArray],
image_size: ImageSize = None,
interpolation: int = cv2.INTER_LINEAR
) -> Iterable[ImageArray]:
is_first = True
for image_array in video_images:
LOGGER.debug('video image_array.shape: %s', image_array.shape)
if is_first:
LOGGER.info(
'received video image shape: %s (requested: %s)',
image_array.shape, image_size
)
is_first = False
if image_size and get_image_size(image_array) != image_size:
image_array = cv2.resize(
image_array,
(image_size.width, image_size.height),
interpolation=interpolation
)
yield image_array
def iter_convert_video_images_to_rgb(
video_images: Iterable[ImageArray]
) -> Iterable[ImageArray]:
return (bgr_to_rgb(image_array) for image_array in video_images)
def iter_delay_video_images_to_fps(
video_images: Iterable[ImageArray],
fps: float = None
) -> Iterable[np.ndarray]:
if not fps or fps <= 0:
yield from video_images
return
desired_frame_time = 1 / fps
last_frame_time = None
frame_times: Deque[float] = deque(maxlen=10)
current_fps = 0.0
additional_frame_adjustment = 0.0
end_frame_time = monotonic()
video_images_iterator = iter(video_images)
while True:
start_frame_time = end_frame_time
# attempt to retrieve the next frame (that may vary in time)
try:
image_array = np.copy(next(video_images_iterator))
except StopIteration:
return
# wait time until delivery in order to achieve a similar fps
current_time = monotonic()
if last_frame_time:
desired_wait_time = (
desired_frame_time
- (current_time - last_frame_time)
+ additional_frame_adjustment
)
if desired_wait_time > 0:
LOGGER.debug(
'sleeping for desired fps: %s (desired_frame_time: %s, fps: %.3f)',
desired_wait_time, desired_frame_time, current_fps
)
sleep(desired_wait_time)
last_frame_time = monotonic()
# emit the frame (post processing may add to the overall)
yield image_array
end_frame_time = monotonic()
frame_time = end_frame_time - start_frame_time
additional_frame_adjustment = desired_frame_time - frame_time
frame_times.append(frame_time)
current_fps = 1 / (sum(frame_times) / len(frame_times))
def iter_read_video_images(
video_capture: cv2.VideoCapture,
image_size: ImageSize = None,
interpolation: int = cv2.INTER_LINEAR,
repeat: bool = False,
preload: bool = False,
fps: float = None,
threading_enabled: bool = True,
stopped_event: Event = None
) -> Iterable[np.ndarray]:
video_images: Iterable[np.ndarray]
if preload:
LOGGER.info('preloading video images')
preloaded_video_images = list(
iter_convert_video_images_to_rgb(iter_resize_video_images(
iter_read_raw_video_images(video_capture, repeat=False),
image_size=image_size, interpolation=interpolation
))
)
if repeat:
video_images = cycle(preloaded_video_images)
return iter_delay_video_images_to_fps(video_images, fps)
video_images = iter_read_raw_video_images(video_capture, repeat=repeat)
video_images = iter_delay_video_images_to_fps(video_images, fps)
if threading_enabled:
video_images = iter_read_threaded(
video_images, stopped_event=stopped_event
)
video_images = iter_resize_video_images(
video_images, image_size=image_size, interpolation=interpolation
)
video_images = iter_convert_video_images_to_rgb(video_images)
return video_images
@contextmanager
def get_video_image_source( # pylint: disable=too-many-locals
path: str,
image_size: ImageSize = None,
repeat: bool = False,
preload: bool = False,
download: bool = True,
fps: float = None,
fourcc: str = None,
buffer_size: int = None,
threading_enabled: bool = True,
stopped_event: Event = None,
**_
) -> Iterator[Iterable[ImageArray]]:
local_path = get_file(path, download=download)
if local_path != path:
LOGGER.info('loading video: %r (downloaded from %r)', local_path, path)
else:
LOGGER.info('loading video: %r', path)
video_capture = VideoCaptureWrapper(local_path)
if fourcc:
LOGGER.info('setting video fourcc to %r', fourcc)
video_capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*fourcc))
if buffer_size:
video_capture.set(cv2.CAP_PROP_BUFFERSIZE, buffer_size)
if image_size:
LOGGER.info('attempting to set video image size to: %s', image_size)
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, image_size.width)
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, image_size.height)
if fps:
LOGGER.info('attempting to set video fps to %r', fps)
video_capture.set(cv2.CAP_PROP_FPS, fps)
actual_image_size = ImageSize(
width=video_capture.get(cv2.CAP_PROP_FRAME_WIDTH),
height=video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
)
actual_fps = video_capture.get(cv2.CAP_PROP_FPS)
frame_count = video_capture.get(cv2.CAP_PROP_FRAME_COUNT)
LOGGER.info(
'video reported image size: %s (%s fps, %s frames)',
actual_image_size, actual_fps, frame_count
)
if preload and frame_count <= 0:
LOGGER.info('disabling preload for video source with unknown frame count')
preload = False
try:
LOGGER.debug('threading_enabled: %s', threading_enabled)
yield iter_read_video_images(
video_capture,
image_size=image_size,
repeat=repeat,
preload=preload,
fps=fps if fps is not None else actual_fps,
threading_enabled=threading_enabled,
stopped_event=stopped_event
)
finally:
LOGGER.debug('releasing video capture: %s', path)
video_capture.release()
def get_webcam_image_source(
path: str,
fourcc: str = None,
buffer_size: int = 1,
**kwargs
) -> ContextManager[Iterable[ImageArray]]:
if fourcc is None:
fourcc = DEFAULT_WEBCAM_FOURCC
return get_video_image_source(path, fourcc=fourcc, buffer_size=buffer_size, **kwargs)
class ShowImageSink:
def __init__(self, window_name: str):
self.window_name = window_name
def __enter__(self):
cv2.namedWindow(self.window_name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(self.window_name, 600, 600)
return self
def __exit__(self, *_, **__):
cv2.destroyAllWindows()
def __call__(self, image_array: ImageArray):
if cv2.getWindowProperty(self.window_name, cv2.WND_PROP_VISIBLE) <= 0:
LOGGER.info('window closed')
raise KeyboardInterrupt('window closed')
image_array = np.asarray(image_array).astype(np.uint8)
cv2.imshow(self.window_name, rgb_to_bgr(image_array))
cv2.waitKey(1)
|
tutorial_wms.py
|
"""
mss.tutorials.tutorial_wms
~~~~~~~~~~~~~~~~~~~~~~~~~~
This python script generates an automatic demonstration of how to use the web map service section of Mission
Support System and plan flighttracks accordingly.
This file is part of mss.
:copyright: Copyright 2021 Hrithik Kumar Verma
:copyright: Copyright 2021 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pyautogui as pag
import multiprocessing
import sys
from sys import platform
from pyscreeze import ImageNotFoundException
from tutorials import screenrecorder as sr
from mslib.msui import mss_pyui
def initial_ops():
"""
Executes the initial operations such as closing all opened windows and showing the desktop.
"""
pag.sleep(5)
if platform == "linux" or platform == "linux2":
pag.hotkey('winleft', 'd')
print("\n INFO : Automation is running on Linux system..\n")
elif platform == "darwin":
pag.hotkey('option', 'command', 'm')
print("\n INFO : Automation is running on Mac OS..\n")
elif platform == "win32":
pag.hotkey('win', 'd')
print("\n INFO : Automation is running on Windows OS..\n")
else:
pag.alert(text="Sorry, no support on this platform!", title="Platform Exception", button='OK')
def call_recorder():
"""
Calls the screen recorder class to start the recording of the automation.
"""
sr.main()
def call_mss():
"""
Calls the main MSS GUI window since operations are to be performed on it only.
"""
mss_pyui.main()
def automate_waypoints():
"""
This is the main automating script of the MSS web map service tutorial which will be recorded and saved
to a file having dateframe nomenclature with a .mp4 extension(codec).
"""
# Giving time for loading of the MSS GUI.
pag.sleep(5)
if platform == 'linux' or platform == 'linux2' or platform == 'darwin':
dir_path = 'pictures/tutorial_wms/linux/'
elif platform == 'win32':
dir_path = 'pictures/tutorial_wms/win/'
# Maximizing the window
try:
if platform == 'linux' or platform == 'linux2':
pag.hotkey('winleft', 'up')
elif platform == 'darwin':
pag.hotkey('ctrl', 'command', 'f')
elif platform == 'win32':
pag.hotkey('win', 'up')
except Exception:
print("\nException : Enable Shortcuts for your system or try again!")
pag.sleep(2)
pag.hotkey('ctrl', 'h')
pag.sleep(1)
# Locating Server Layer
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}layers.png')
pag.click(x, y, interval=2)
if platform == 'win32':
pag.move(35, -485, duration=1)
pag.dragRel(-800, -60, duration=2)
elif platform == 'linux' or platform == 'linux2' or platform == 'darwin':
pag.move(35, -522, duration=1)
pag.dragRel(950, -30, duration=2)
pag.sleep(1)
except (ImageNotFoundException, OSError, Exception):
print("\nException : \'Server\\Layers\' button/option not found on the screen.")
# Entering wms URL
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}wms_url.png')
pag.click(x + 220, y, interval=2)
pag.hotkey('ctrl', 'a', interval=1)
pag.write('http://open-mss.org/', interval=0.25)
except (ImageNotFoundException, OSError, Exception):
print("\nException : \'WMS URL\' editbox button/option not found on the screen.")
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}get_capabilities.png')
pag.click(x, y, interval=2)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\nException : \'Get capabilities\' button/option not found on the screen.")
# Selecting some layers
if platform == 'win32':
gap = 22
elif platform == 'linux' or platform == 'linux2' or platform == 'darwin':
gap = 18
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}divergence_layer.png')
temp1, temp2 = x, y
pag.click(x, y, interval=2)
pag.sleep(1)
pag.move(None, gap, duration=1)
pag.click(interval=1)
pag.sleep(1)
pag.move(None, gap * 2, duration=1)
pag.click(interval=1)
pag.sleep(1)
pag.move(None, gap, duration=1)
pag.click(interval=1)
pag.sleep(1)
pag.move(None, -gap * 4, duration=1)
pag.click(interval=1)
pag.sleep(1)
except (ImageNotFoundException, OSError, Exception):
print("\nException : \'Divergence Layer\' option not found on the screen.")
# Filter layer
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}layer_filter.png')
pag.click(x + 150, y, interval=2)
except (ImageNotFoundException, OSError, Exception):
print("\nException : \'Layer filter editbox\' button/option not found on the screen.")
if x is not None and y is not None:
pag.write('temperature', interval=0.25)
pag.moveTo(temp1, temp2, duration=1)
pag.click(interval=2)
pag.sleep(1)
pag.move(None, gap, duration=1)
pag.click(interval=2)
pag.sleep(1)
# Clearing filter
pag.moveTo(x + 150, y, duration=1)
pag.click(interval=1)
if platform == 'linux' or platform == 'linux2' or platform == 'win32':
pag.press('backspace', presses=11, interval=0.25)
elif platform == 'darwin':
pag.press('delete', presses=11, interval=0.25)
pag.sleep(1)
# Multilayering
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}multilayering.png')
pag.moveTo(x, y, duration=2)
pag.move(-48, None)
pag.click()
pag.sleep(1)
except (ImageNotFoundException, OSError, Exception):
print("\nException : \'Multilayering Checkbox\' button/option not found on the screen.")
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}checkbox_unselected_divergence.png')
if platform == 'win32':
pag.moveTo(x - 268, y, duration=2)
elif platform == 'linux' or platform == 'linux2' or platform == 'darwin':
pag.moveTo(x - 228, y, duration=2)
pag.click(interval=1)
pag.sleep(2)
pag.move(None, gap * 4, duration=1)
pag.click(interval=1)
pag.sleep(2)
pag.move(None, -gap * 4, duration=1)
pag.click(interval=1)
pag.sleep(2)
pag.move(None, gap * 4, duration=1)
pag.click(interval=1)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException : \'Divergence layer multilayering checkbox\' option not found on the screen.")
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}multilayering.png')
pag.moveTo(x, y, duration=2)
pag.move(-48, None)
pag.click()
pag.sleep(1)
except (ImageNotFoundException, OSError, Exception):
print("\nException : \'Multilayering Checkbox\' button/option not found on the screen.")
# Starring the layers
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}unselected_divergence_layer.png')
if platform == 'win32':
pag.moveTo(x - 255, y, duration=2)
elif platform == 'linux' or platform == 'linux2' or platform == 'darwin':
pag.moveTo(x - 231, y, duration=2)
pag.click(interval=1)
pag.sleep(1)
pag.move(None, gap * 4, duration=1)
pag.click(interval=1)
pag.sleep(1)
pag.move(None, -gap, duration=1)
pag.click(interval=1)
pag.sleep(1)
except (ImageNotFoundException, OSError, Exception):
print("\nException : \'Divergence layer star\' button/option not found on the screen.")
# Filtering starred layers.
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}star_filter.png')
pag.click(x, y, interval=2)
pag.click(temp1, temp2, duration=1)
pag.sleep(1)
pag.move(None, gap, duration=1)
pag.click(interval=1)
pag.sleep(1)
pag.move(None, gap, duration=1)
pag.click(interval=1)
pag.sleep(1)
pag.moveTo(x - 20, y, duration=1)
pag.click(interval=1)
pag.sleep(1)
except (ImageNotFoundException, OSError, Exception):
print("\nException : \'Starred filter\' button/option not found on the screen.")
# Setting different levels and valid time
if temp1 is not None and temp2 is not None:
pag.click(temp1, temp2 + (gap * 4), interval=2)
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}level.png')
pag.click(x + 200, y, interval=2)
pag.move(None, 20, duration=1)
pag.click(interval=1)
pag.sleep(3)
pag.click(x + 200, y, interval=1)
pag.move(None, 100, duration=1)
pag.click(interval=1)
pag.sleep(3)
pag.click(x + 200, y, interval=1)
pag.move(None, 140, duration=1)
pag.click(interval=1)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\nException : \'Pressure level\' button/option not found on the screen.")
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}initialization.png')
initx, inity = x, y
pag.click(x + 200, y, interval=1)
pag.sleep(1)
pag.click(x + 200, y, interval=1)
except (ImageNotFoundException, OSError, Exception):
print("\nException : \'Initialization\' button/option not found on the screen.")
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}valid.png')
validx, validy = x, y
pag.click(x + 200, y, interval=2)
pag.move(None, 20, duration=1)
pag.click(interval=1)
pag.sleep(3)
pag.click(x + 200, y, interval=1)
pag.move(None, 80, duration=1)
pag.click(interval=1)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\nException : \'Valid till\' button/option not found on the screen.")
# Time gap for initialization and valid
if initx is not None and inity is not None and validx is not None and validy is not None:
pag.click(initx + 818, inity, interval=2)
pag.press('up', presses=5, interval=0.25)
pag.press('down', presses=3, interval=0.25)
if platform == 'linux' or platform == 'linux2' or platform == 'win32':
pag.press('enter')
elif platform == 'darwin':
pag.press('return')
pag.click(validx + 833, validy, interval=2)
pag.press('up', presses=5, interval=0.20)
pag.press('down', presses=6, interval=0.20)
if platform == 'linux' or platform == 'linux2' or platform == 'win32':
pag.press('enter')
elif platform == 'darwin':
pag.press('return')
# Previous and Next of Initial(Initialization) values
pag.click(initx + 753, inity, clicks=2, interval=2)
pag.click(initx + 882, inity, clicks=2, interval=2)
# Previous and Next of Valid values
pag.click(validx + 760, validy, clicks=4, interval=4)
pag.click(validx + 887, validy, clicks=4, interval=4)
# Auto-update feature of wms
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}auto_update.png')
pag.click(x - 53, y, interval=2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\' auto update checkbox\' button/option not found on the screen.")
if temp1 is not None and temp2 is not None:
pag.click(temp1, temp2, interval=1)
try:
retx, rety = pag.locateCenterOnScreen(f'{dir_path}retrieve.png')
pag.click(retx, rety, interval=2)
pag.sleep(3)
pag.click(temp1, temp2 + (gap * 4), interval=2)
pag.click(retx, rety, interval=2)
pag.sleep(3)
pag.click(x - 53, y, interval=2)
pag.click(temp1, temp2, interval=2)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\' retrieve\' button/option not found on the screen.")
# Using and not using Cache
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}use_cache.png')
pag.click(x - 46, y, interval=2)
pag.click(temp1, temp2, interval=2)
pag.sleep(4)
pag.click(temp1, temp2 + (gap * 4), interval=2)
pag.sleep(4)
pag.click(x - 46, y, interval=2)
pag.click(temp1, temp2 + (gap * 2), interval=2)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Use Cache checkbox\' button/option not found on the screen.")
# Clearing cache. The layers load slower
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}clear_cache.png')
pag.click(x, y, interval=2)
if platform == 'linux' or platform == 'linux2' or platform == 'win32':
pag.press('enter', interval=1)
elif platform == 'darwin':
pag.press('return', interval=1)
pag.click(temp1, temp2, interval=2)
pag.sleep(4)
pag.click(temp1, temp2 + (gap * 4), interval=2)
pag.sleep(4)
pag.click(temp1, temp2 + (gap * 2), interval=2)
pag.sleep(4)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Clear cache\' button/option not found on the screen.")
# qrent layer
if temp1 is not None and temp2 is not None:
pag.click(temp1, temp2, interval=2)
pag.sleep(1)
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}transparent.png')
pag.click(x - 53, y, interval=2)
if retx is not None and rety is not None:
pag.click(retx, rety, interval=2)
pag.sleep(1)
pag.click(x - 53, y, interval=2)
pag.click(temp1, temp2, interval=2)
pag.click(retx, rety, interval=2)
pag.sleep(1)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Transparent Checkbox\' button/option not found on the screen.")
# Removing a Layer from the map
if temp1 is not None and temp2 is not None:
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}remove.png')
pag.click(x, y, interval=2)
pag.sleep(1)
pag.click(temp1, temp2 + (gap * 4), interval=2)
pag.click(x, y, interval=2)
pag.sleep(1)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Transparent Checkbox\' button/option not found on the screen.")
# Deleting All layers
try:
x, y = pag.locateCenterOnScreen(f'{dir_path}delete_layers.png')
if platform == 'win32':
pag.click(x - 74, y, interval=2)
elif platform == 'linux' or platform == 'linux2' or platform == 'darwin':
pag.click(x - 70, y, interval=2)
pag.sleep(1)
x1, y1 = pag.locateCenterOnScreen(f'{dir_path}get_capabilities.png')
pag.click(x1, y1, interval=2)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Deleting all layers bin\' button/option not found on the screen.")
print("\nAutomation is over for this tutorial. Watch next tutorial for other functions.")
# Close Everything!
try:
if platform == 'linux' or platform == 'linux2':
pag.hotkey('altleft', 'f4', interval=1)
for _ in range(2):
pag.hotkey('altleft', 'f4')
pag.sleep(1)
pag.press('left')
pag.sleep(1)
pag.press('enter')
pag.sleep(1)
pag.keyDown('altleft')
pag.press('tab')
pag.press('left')
pag.keyUp('altleft')
pag.press('q')
if platform == 'win32':
pag.hotkey('alt', 'f4', interval=1)
for _ in range(2):
pag.hotkey('alt', 'f4')
pag.sleep(1)
pag.press('left')
pag.sleep(1)
pag.press('enter')
pag.sleep(1)
pag.hotkey('alt', 'tab')
pag.press('q')
elif platform == 'darwin':
pag.hotkey('command', 'w', interval=1)
for _ in range(2):
pag.hotkey('command', 'w')
pag.sleep(1)
pag.press('left')
pag.sleep(1)
pag.press('return')
pag.sleep(1)
pag.hotkey('command', 'tab')
pag.press('q')
except Exception:
print("Cannot automate : Enable Shortcuts for your system or try again")
# pag.press('q') # In some cases, recording windows does not closes. So it needs to ne there.
def main():
"""
This function runs the above functions as different processes at the same time and can be
controlled from here. (This is the main process.)
"""
p1 = multiprocessing.Process(target=call_mss)
p2 = multiprocessing.Process(target=automate_waypoints)
p3 = multiprocessing.Process(target=call_recorder)
print("\nINFO : Starting Automation.....\n")
p3.start()
pag.sleep(5)
initial_ops()
p1.start()
p2.start()
p2.join()
p1.join()
p3.join()
print("\n\nINFO : Automation Completes Successfully!")
# pag.press('q') # In some cases, recording windows does not closes. So it needs to ne there.
sys.exit()
if __name__ == '__main__':
main()
|
vidIO.py
|
#!/usr/bin/env python3
import cv2
import numpy as np
from time import perf_counter, sleep
from queue import Queue
from threading import Thread, Event
from pcv.interact import DoNothing, waitKey
from pcv.source import VideoSource
class BlockingVideoWriter(cv2.VideoWriter):
''' A cv2.VideoWriter with a context manager for releasing.
Generally suggested to use the non-blocking, threaded VideoWriter class
instead, unless your application requires no wait time on completion
but permits performance reduction throughout to write frames. If that's
the case, try VideoWriter anyway, and come back to this if a notable
backlog occurs (it will tell you).
'''
properties = {
'quality' : cv2.VIDEOWRITER_PROP_QUALITY,
'framebytes' : cv2.VIDEOWRITER_PROP_FRAMEBYTES,
'nstripes' : cv2.VIDEOWRITER_PROP_NSTRIPES,
}
# functioning combinations are often hard to find - these hopefully work
SUGGESTED_CODECS = {
'avi' : ['H264','X264','XVID','MJPG'],
'mp4' : ['avc1','mp4v'],
'mov' : ['avc1','mp4v'],
'mkv' : ['H264'],
}
def __init__(self, filename, fourcc, fps, frameSize, isColor=True,
apiPreference=None):
''' Initialise a BlockingVideoWriter with the given parameters.
'filename' The video file to write to.
'fourcc' the "four character code" representing the writing codec.
Can be a four character string, or an int as returned by
cv2.VideoWriter_fourcc. As functioning combinations of file
extension + codec can be difficult to find, the helper method
VideoWriter.suggested_codec is provided, accepting a filename
(or file extension) and a list of previously tried codecs that
didn't work and should be excluded. Suggested codecs are populated
from VideoWriter.SUGGESTED_CODECS, if you wish to view the
suggested options directly.
'fps' is the framerate (frames per second) to save as. It is a constant
float, and can only be set on initialisation. To have a video that
plays back faster than the recording stream, set the framerate to
higher than the input framerate. The VideoWriter.from_camera
factory function is provided to create a video-writer directly
from a camera instance, and allows measuring of the input framerate
for accurate output results if desired.
'frameSize' is the size of the input frames as a tuple of (rows, cols).
'isColor' is a boolean specifying if the saved images are coloured.
Defaults to True. Set False for greyscale input streams.
'apiPreference' allows specifying which API backend to use. It can be
used to enforce a specific reader implementation if multiple are
available (e.g. cv2.CAP_FFMPEG or cv2.CAP_GSTREAMER). Generally
this is not required, and if left as None it is ignored.
'''
self.filename = filename
self.fps = fps
self.is_color = isColor
self.frame_size = frameSize
self.api_preference = apiPreference
self.set_fourcc(fourcc)
super().__init__(*self._construct_open_args())
def set_fourcc(self, fourcc):
''' Set fourcc code as an integer or an iterable of 4 chars. '''
self.fourcc = fourcc # save for checking value
if not isinstance(fourcc, int):
# assume iterable of 4 chars
fourcc = cv2.VideoWriter_fourcc(*fourcc)
self._fourcc = fourcc
def _construct_open_args(self):
args = [self.filename, self._fourcc, self.fps, self.frame_size,
self.is_color]
if self.api_preference is not None:
args = [args[0], self.api_preference, *args[1:]]
return args
def __enter__(self):
''' Re-entrant '''
if not self.isOpened():
self.open(*self._construct_open_args())
return self
def __exit__(self, *args):
self.release()
def get(self, property):
''' Returns 'property' value, or 0 if not supported by the backend.
'property' can be a string key for the VideoWriter.properties
dictionary or an integer from cv2.VIDEOWRITER_PROP_*
self.get(str/int) -> float
'''
try:
return super().get(self.properties[property.lower()])
except AttributeError:
return super().get(property)
def set(self, property, value):
''' Attempts to set the specified property value.
Returns True if the property is supported by the backend in use.
'property' can be a string key for the VideoWriter.properties
dictionary or an integer from cv2.VIDEOWRITER_PROP_*
'value' should be a float
self.set(str/int, float) -> bool
'''
try:
return super().set(self.properties[property.lower()], value)
except AttributeError:
return super().set(property, value)
@classmethod
def suggested_codec(cls, filename, exclude=[]):
extension = filename.split('.')[-1]
try:
return [codec for codec in cls.SUGGESTED_CODECS[extension.lower()]
if codec not in exclude][0]
except IndexError:
raise Exception('No codecs available, try a different extension')
@classmethod
def from_camera(cls, filename, camera, fourcc=None, isColor=True,
apiPreference=None, fps=-3, frameSize=None, **kwargs):
''' Returns a VideoWriter based on the properties of the input camera.
'filename' is the name of the file to save to.
'camera' is the SlowCamera instance (or any of its subclasses).
'fourcc' is the codec four-character code. If left as None is
determined automatically from filename.
'isColor' specifies if the video stream is colour or greyscale.
'fps' can be set as a float, 'camera' to ask the camera for the value,
or a negative integer to measure over that number of frames.
If no processing is occurring, 'camera' is suggested, otherwise
it is generally best to measure the frame output.
Defaults to -3, to measure over 3 frames.
'frameSize' is an integer tuple of (width, height)/(cols, rows).
If left as None, uses `camera.get` to retrieve width and height.
'kwargs' are any additional keyword arguments for initialisation.
'''
if fourcc is None:
fourcc = cls.suggested_codec(filename)
if frameSize is None:
frameSize = tuple(int(camera.get(dim))
for dim in ('width', 'height'))
if fps == 'camera':
fps = camera.get('fps')
elif fps < 0:
fps = camera.measure_framerate(-fps)
return cls(filename, fourcc, fps, frameSize, isColor, apiPreference,
**kwargs)
def __repr__(self):
return (f'{self.__class__.__name__}(filename={repr(self.filename)}, '
f'fourcc={repr(self.fourcc)}, fps={self.fps}, '
f'frameSize={self.frame_size}, isColor={self.is_colour}, '
f'apiPreference={self.api_preference})')
class VideoWriter(BlockingVideoWriter):
''' A non-blocking thread-based video writer, using a queue. '''
def __init__(self, *args, maxsize=0, verbose_exit=True, **kwargs):
''' Initialise the video writer.
'maxsize' is the maximum allowed frame buildup before adding frames
blocks execution. Defaults to 0 (no maximum). Set a meaningful
number if you have fast processing, limited memory, and can't
afford the time required to wait at the end once you've finished
recording. Setting a number for this is helpful in early testing
to get notified of cases where writing to disk is a bottleneck
(you may get processing freezes from time to time as a result).
Consistently slow write times may indicate a need for a more
efficient file format, memory type, or just lower resolution in
time or space (ie fewer fps or smaller images).
'verbose_exit' is a boolean indicating if the writer should notify
you on exit if a backlog wait will be required, and if so once it
completes and how long it took. Defaults to True.
*args and **kwargs are the same as those for BlockingVideoWriter.
'''
super().__init__(*args, **kwargs)
self._initialise_writer(maxsize)
self._verbose_exit = verbose_exit
def _initialise_writer(self, maxsize):
''' Start the Thread for writing images from the queue. '''
self.max_queue_size = maxsize
self._write_queue = Queue(maxsize=maxsize)
self._image_writer = Thread(name='writer', target=self._writer,
daemon=True)
self._image_writer.start()
def _writer(self):
''' Write frames forever, until '''
while "not finished":
# retrieve an image, wait indefinitely if necessary
img = self._write_queue.get()
# write the image to file ('where' is specified outside)
super().write(img)
# inform the queue that a frame has been written
self._write_queue.task_done()
def write(self, img):
''' Send 'img' to the write queue. '''
self._write_queue.put(img)
def __exit__(self, *args):
''' Wait for writing to complete, and release writer. '''
# assume not waiting
waited = False
# check if waiting required
if self._verbose_exit and not self._write_queue.empty():
print(f'Writing {self._write_queue.qsize()} remaining frames.')
print('Force quitting may result in a corrupted video file.')
waited = perf_counter()
# finish writing all frames
self._write_queue.join()
# cleanup as normal
super().__exit__(*args)
# if wait occurred, inform of completion
if waited and self._verbose_exit:
print(f'Writing complete in {perf_counter()-waited:.3f}s.')
class GuaranteedVideoWriter(VideoWriter):
''' A VideoWriter with guaranteed output FPS.
Repeats frames when input too slow, and skips frames when input too fast.
'''
def _initialise_writer(self, maxsize):
''' Start the write-queue putter and getter threads. '''
super()._initialise_writer(maxsize)
self._period = 1 / self.fps
self.latest = None
self._finished = Event()
self._looper = Thread(name='looper', target=self._write_loop)
self._looper.start()
def _write_loop(self):
''' Write the latest frame to the queue, at self.fps.
Repeats frames when input too slow, and skips frames when input too fast.
'''
# wait until first image set, or early finish
while self.latest is None and not self._finished.is_set():
sleep(self._period / 2)
prev = perf_counter()
self._error = 0
delay = self._period - 5e-3
# write frames at specified rate until told to stop
while not self._finished.is_set():
super().write(self.latest)
new = perf_counter()
self._error += self._period - (new - prev)
delay -= self._error
delay = max(delay, 0) # can't go back in time
sleep(delay)
prev = new
def write(self, img):
''' Set the latest image. '''
self.latest = img
def __exit__(self, *args):
self._finished.set()
self._looper.join()
if self._verbose_exit:
print(f'Net timing error = {self._error * 1e3:.3f}ms')
super().__exit__(*args)
class OutOfFrames(StopIteration):
def __init__(msg='Out of video frames', *args, **kwargs):
super().__init__(msg, *args, **kwargs)
class UserQuit(StopIteration):
def __init__(msg='User quit manually', *args, **kwargs):
super().__init__(msg, *args, **kwargs)
class OpenCVSource(cv2.VideoCapture):
''' A class to provide opencv's VideoCapture as a VideoSource backend. '''
# more properties + descriptions can be found in the docs:
# https://docs.opencv.org/3.4/d4/d15/group__videoio__flags__base.html#gaeb8dd9c89c10a5c63c139bf7c4f5704d
properties = {
'fps' : cv2.CAP_PROP_FPS,
'mode' : cv2.CAP_PROP_MODE,
'width' : cv2.CAP_PROP_FRAME_WIDTH,
'height' : cv2.CAP_PROP_FRAME_HEIGHT,
'backend' : cv2.CAP_PROP_BACKEND,
}
def get(self, property):
try:
return super().get(self.properties.get(property, property))
except TypeError: # property must be an unknown string
return super().get(getattr(cv2, 'CAP_PROP' + property.upper()))
def set(self, property, value):
try:
return super().set(self.properties.get(property, property), value)
except TypeError: # 'property' must be an unknown string
return super().set(getattr(cv2, 'CAP_PROP_' + property.upper()), value)
class ContextualVideoCapture(VideoSource):
''' A video-capturing class with a context manager for releasing. '''
def __init__(self, id, *args, display='frame', delay=None, quit=ord('q'),
play_pause=ord(' '), pause_effects={}, play_commands={},
destroy=-1, source=OpenCVSource, **kwargs):
''' A pausable, quitable, iterable video-capture object
with context management.
'id' is the id that gets passed to the underlying VideoCapture object.
it can be an integer to select a connected camera, or a filename
to open a video.
'display' is used as the default window name when streaming. Defaults
to 'frame'.
'delay' is the integer millisecond delay applied between each iteration
to enable windows to update. If set to None, this is skipped and
the user must manually call waitKey to update windows.
Default is None, which allows headless operation without
unnecessary waiting.
'quit' is an integer ordinal corresponding to a key which can be used
to stop the iteration loop. Only applies if delay is not None.
Default is ord('q'), so press the 'q' key to quit when iterating.
'play_pause' is an integer ordinal corresponding to a key which can be
used to pause and resume the iteration loop. Only applies if delay
is not None. Default is ord(' '), so press space-bar to pause/
resume when iterating.
'pause_effects' is a dictionary of key ordinals and corresponding
handler functions. The handler will be passed self as its only
argument, which gives it access to the 'get' and 'set' methods,
as well as the 'status' and 'image' properties from the last 'read'
call. This can be useful for logging, selecting images for
labelling, or temporary manual control of the event/read loop.
Note that this is only used while paused, and does not get passed
quit or play_pause key events.
'play_commands' is the same as 'pause_effects' but operates instead
while playback/streaming is occurring. For live processing,
this can be used to change playback modes, or more generally for
similar scenarios as 'pause_effects'.
'destroy' destroys any specified windows on context exit. Can be 'all'
to destroy all active opencv windows, a string of a specific window
name, or a list of window names to close. If left as -1, destroys
the window specified in 'display'.
'source' is a class which acts like a video source, by implementing at
minimum the methods 'get', 'set', 'read', 'grab', 'retrieve',
'open', 'isOpened', and 'release'.
'''
super().__init__(source, id, *args, **kwargs)
self._id = id
self.display = display
self._delay = delay
self._quit = quit
self._play_pause = play_pause
self._pause_effects = pause_effects
self._play_commands = play_commands
self._destroy = destroy
self._api_preference = kwargs.get('apiPreference', None)
def __enter__(self, force=False):
''' Enter a re-entrant context for this camera. '''
if force or not self.isOpened():
if self._api_preference:
self.open(self._id, self._api_preference)
else:
self.open(self._id)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
''' Clean up on context exit.
Releases the internal VideoCapture object, and destroys any windows
specified at initialisation.
'''
# release VideoCapture object
self.release()
# clean up window(s) if specified on initialisation
destroy = self._destroy
try:
if destroy == -1:
cv2.destroyWindow(self.display)
elif destroy == 'all':
cv2.destroyAllWindows()
elif isinstance(destroy, str):
# a single window name
cv2.destroyWindow(destroy)
elif destroy is not None:
# assume an iterable of multiple windows
for window in destroy: cv2.destroyWindow(window)
else:
return # destroy is None
except cv2.error as e:
print('Failed to destroy window(s)', e)
waitKey(3) # allow the GUI manager to update
def __iter__(self):
return self
def __next__(self):
# check if doing automatic waits
if self._delay is not None:
key = waitKey(self._delay)
if key == self._quit:
raise UserQuit
elif key == self._play_pause:
self._handle_pause()
else:
# pass self to a triggered user-defined key handler, or nothing
self._play_commands.get(key, lambda cap: None)(self)
# wait completed, get next frame if possible
if self.isOpened():
return self.read()
raise OutOfFrames
def _handle_pause(self):
''' Handle event loop and key-presses while paused. '''
while "paused":
key = waitKey(1)
if key == self._quit:
raise UserQuit
if key == self._play_pause:
break
# pass self to a triggered user-defined key handler, or do nothing
self._pause_effects.get(key, lambda cap: None)(self)
def stream(self, mouse_handler=DoNothing()):
''' Capture and display stream on window specified at initialisation.
'mouse_handler' is an optional MouseCallback instance determining
the effects of mouse clicks and moves during the stream. Defaults
to DoNothing.
'''
# ensure mouse_handler has something to bind to
cv2.namedWindow(self.display)
# engage mouse_handler and start the stream
with mouse_handler:
for read_success, frame in self:
if read_success:
cv2.imshow(self.display, frame)
else:
break # camera disconnected
def headless_stream(self):
''' Capture and process stream without display. '''
for read_success, frame in self:
if not read_success: break # camera disconnected
def record_stream(self, filename, show=True, mouse_handler=DoNothing(),
writer=VideoWriter, **kwargs):
''' Capture and record stream, with optional display.
'filename' is the file to save to.
'show' is a boolean specifying if the result is displayed (on the
window specified at initialisation).
'mouse_handler' is an optional MouseCallback instance determining
the effects of mouse clicks and moves during the stream. It is only
useful if 'show' is set to True. Defaults to DoNothing.
'writer' is a subclass of VideoWriter. Defaults to VideoWriter.
Set to GuaranteedVideoWriter to allow repeated and skipped frames
to better ensure a consistent output framerate.
**kwargs are passed to the 'writer's from_camera method (e.g. can be used
to indicate the 'frameSize' or a greyscale output (isColor=False)).
'''
if show:
# ensure mouse_handler has something to bind to
cv2.namedWindow(self.display)
# create writer, engage mouse_handler, and start the stream
with writer.from_camera(filename, self, **kwargs) as writer, \
mouse_handler:
for read_success, frame in self:
if read_success:
if show:
cv2.imshow(self.display, frame)
writer.write(frame)
else:
break # camera disconnected
def read(self, image=None):
if image is not None:
status, image = super().read(image)
else:
status, image = super().read()
self.status, self.image = status, image
return status, image
class SlowCamera(ContextualVideoCapture):
''' A basic, slow camera class for processing frames relatively far apart.
Use 'Camera' instead unless you need to reduce power/CPU usage and the time
to read an image is insignificant in your processing pipeline.
'''
def __init__(self, camera_id=0, *args, delay=1, **kwargs):
''' Create a camera capture instance with the given id.
Arguments are the same as ContextualVideoCapture, but 'id' is replaced
with 'camera_id', and 'delay' is set to 1 by default instead of
None.
'''
super().__init__(camera_id, *args, delay=delay, **kwargs)
def measure_framerate(self, frames):
''' Measure framerate for specified number of frames. '''
count = 0
for read_success, frame in self:
if self.display:
cv2.imshow(self.display, frame)
count += 1
if count == 1:
start = perf_counter() # avoid timing opening the window
if count > frames:
# desired frames reached, set fps as average framerate
return count / (perf_counter() - start)
def __repr__(self):
return f"{self.__class__.__name__}(camera_id={self._id!r})"
class Camera(SlowCamera):
''' A camera for always capturing the latest frame, fast.
Use this instead of 'SlowCamera', unless you need to reduce power/CPU
usage, and the time to read an image is insignificant in your processing
pipeline.
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._initialise_grabber()
def _initialise_grabber(self):
''' Start the Thread for grabbing images. '''
self._finished = Event()
self._image_grabber = Thread(name='grabber', target=self._grabber,
daemon=True) # auto-kill when finished
self._image_grabber.start()
self._wait_for_grabber_start()
def _grabber(self):
''' Grab images as fast as possible - only latest gets processed. '''
while not self._finished.is_set():
self.grab()
def _wait_for_grabber_start(self):
''' Waits for a successful retrieve. Raises Exception after 50 attempts. '''
for check in range(50):
if self.retrieve()[0]: break
sleep(0.1)
else:
raise Exception(f'Failed to start {self.__class__.__name__}')
def __exit__(self, *args):
self._finished.set()
self._image_grabber.join()
super().__exit__(*args)
def read(self, image=None):
''' Read and return the latest available image. '''
if image is not None:
status, image = self.retrieve(image)
else:
status, image = self.retrieve()
self.status, self.image = status, image
return status, image
class LockedCamera(Camera):
''' A camera for semi-synchronously capturing a single image at a time.
Like 'Camera' but uses less power+CPU by only capturing images on request.
Allows specifying when each image should start being captured, then doing
some processing while the image is being grabbed and decoded (and
optionally pre-processed), before using it.
Images may be less recent than achieved with Camera, depending on when the
user starts the capture process within their processing pipeline, but can
also be more recent if started near the end of the pipeline (at the risk of
having to wait for the capturing to complete).
'''
def __init__(self, *args, preprocess=lambda img:img,
process=lambda img:img, **kwargs):
''' Create a camera capture instance with the given id.
'preprocess' is an optional function which takes an image and returns
a modified image, which gets applied to each frame on read.
Defaults to no preprocessing.
'process' is an optional function which takes an image and returns
a modified image, which gets applied to each preprocessed frame
after the next frame has been requested. Defaults to no processing.
*args and **kwargs are the same as for Camera.
'''
super().__init__(*args, **kwargs)
self._preprocess = preprocess
self._process = process
self._get_latest_image() # start getting the first image
def _initialise_grabber(self):
''' Create locks and start the grabber thread. '''
self._image_desired = Event()
self._image_ready = Event()
super()._initialise_grabber()
def _grabber(self):
''' Grab and preprocess images on demand, ready for later usage '''
while not self._finished.is_set():
self._wait_until_needed()
# read the latest frame
read_success, frame = super(ContextualVideoCapture, self).read()
# apply any desired pre-processing and store for main thread
self._preprocessed = self._preprocess(frame) if read_success \
else None
# inform that image is ready for access/main processing
self._inform_image_ready()
def _wait_for_grabber_start(self):
''' Not used - done automatically with Events. '''
pass
def _wait_until_needed(self):
''' Wait for main to request the next image. '''
self._image_desired.wait()
self._image_desired.clear()
def _inform_image_ready(self):
''' Inform main that next image is available. '''
self._image_ready.set()
def _get_latest_image(self):
''' Ask camera handler for next image. '''
self._image_desired.set()
def _wait_for_camera_image(self):
''' Wait until next image is available. '''
self._image_ready.wait()
self._image_ready.clear()
def __exit__(self, *args):
self._finished.set()
self._image_desired.set() # allow thread to reach finished check
super().__exit__(*args)
def read(self, image=None):
''' For optimal usage, tune _process to take the same amount of time
as getting the next frame.
'''
self._wait_for_camera_image()
preprocessed = self._preprocessed
self._get_latest_image()
if preprocessed is None:
self.status, self.image = False, None
else:
self.image = self._process(preprocessed)
if image is not None:
image = self.image
self.status = True
return self.status, self.image
class VideoReader(LockedCamera):
''' A class for reading video files. '''
properties = {
**OpenCVSource.properties,
'frame' : cv2.CAP_PROP_POS_FRAMES,
'codec' : cv2.CAP_PROP_FOURCC,
'timestamp' : cv2.CAP_PROP_POS_MSEC,
'num_frames' : cv2.CAP_PROP_FRAME_COUNT,
'proportion' : cv2.CAP_PROP_POS_AVI_RATIO,
}
FASTER, SLOWER, REWIND, FORWARD, RESET, RESTART = \
(ord(key) for key in 'wsadrb')
FORWARD_DIRECTION, REVERSE_DIRECTION = 1, -1
MIN_DELAY = 1 # integer milliseconds
def __init__(self, filename, *args, start=None, end=None, auto_delay=True,
fps=None, skip_frames=None, verbose=True, display='video',
**kwargs):
''' Initialise a video reader from the given file.
For default key-bindings see 'auto_delay' details.
'filename' is the string path of a video file. Depending on the file
format some features may not be available.
'start' and 'end' denote the respective times of playback, according
to the specified fps. They can be integers of milliseconds, or
strings of 'hours:minutes:seconds' (larger amounts can be left off
if 0, e.g. '5:10.35' for no hours). If left as None, the video
starts and ends at the first and last frames respectively.
It is expected that 'start' < 'end', or playback ends immediately.
'auto_delay' is a boolean specifying if the delay between frames should
be automatically adjusted during playback to match the specified
fps. Set to False if operating headless (not viewing the video), or
if manual control is desired while iterating over the video.
If set to False, sets 'destroy' to None if not otherwise set.
If True enables playback control with 'w' increasing playback
speed, 's' slowing it down, 'a' rewinding (only possible if
'skip_frames' is True), and 'd' returning to forwards playback.
The 'r' key can be pressed to reset to 1x speed and forwards
direction playback. 'a' and 'd' can be used while paused to step
back and forwards, regardless of skip_frames. 'b' can be used while
playing or paused to jump the video back to its starting point.
These defaults can be overridden using the 'play_commands' and
'pause_effects' keyword arguments, supplying a dictionary of key
ordinals that sets the desired behaviour. Note that the defaults
are set internally, so to turn them off the dictionary must be
used, with e.g. play_commands={ord('a'):lambda vid:None} to disable
rewinding.
'fps' is a float specifying the desired frames per second for playback.
If left as None the fps is read from file, or if that fails is set
to 25 by default. Value is ignored if 'auto_delay' is False.
'skip_frames' allows frames to be manually set, as required by reverse
or high speed playback. If left as None this is disallowed. If
'auto_delay' is True, any integer value can be set (suggested 0),
and the number of frames to skip at each iteration is determined
as part of the delay tuning. If 'auto_delay' is False, an integer
can be used as a consistent number of frames to skip at each
iteration (e.g. only read every 10th frame). Note that enabling
frame skipping can make playback jerky on devices and/or file
formats with slow video frame setting times, and inconsistent
skipping amounts with 'auto_delay' may cause issues with
time-dependent processing.
'verbose' is a boolean determining if status updates (e.g. initial fps,
and playback speed and direction changes) are printed. Defaults to
True.
*args and **kwargs get passed up the inheritance chain, with notable
keywords including the 'preprocess' and 'process' functions which
take an image and return a processed result (see LockedCamera),
the 'quit' and 'play_pause' key ordinals which are checked if
'auto_delay' is True, and the 'play_commands' and 'pause_effects'
dictionaries mapping key ordinals to desired functionality while
playing and paused (see ContextualVideoCapture documentation for
details).
'''
super().__init__(filename, *args, display=display, **kwargs)
self.filename = filename
self._fps = fps or self.fps or 25 # user-specified or auto-retrieved
self._period = 1e3 / self._fps
self._verbose = verbose
self.status = True
self._initialise_delay(auto_delay)
self._initialise_playback(start, end, skip_frames)
def _initialise_delay(self, auto_delay):
''' Determines the delay automatically, or leaves as None. '''
if auto_delay:
if self._fps == 0 or self._fps >= 1e3:
self.verbose_print('failed to determine fps, setting to 25')
self._period = 1e3 / 25
# set a bit low to allow image read times
self._delay = self._period - 5
else:
self._delay = int(self._period)
self.verbose_print('delay set automatically to',
f'{self._delay}ms from fps={self._fps}')
else:
self._delay = None
if self._destroy == -1:
self._destroy = None
def _initialise_playback(self, start, end, skip_frames):
''' Set up playback settings as specified. '''
self._wait_for_camera_image() # don't set frame while grabber running
self._set_start(start)
self._set_end(end)
self._skip_frames = skip_frames
self._direction = self.FORWARD_DIRECTION
self._speed = 1
self._adjusted_period = self._period
self._calculate_frames()
self._play_commands = {
self.FASTER : self._speed_up,
self.SLOWER : self._slow_down,
self.REWIND : self._go_back,
self.FORWARD : self._go_forward,
self.RESET : self._reset,
self.RESTART : self.restart,
**self._play_commands
}
# add step back and forward functionality if keys not already used
self._pause_effects = {
self.REWIND : self.step_back,
self.FORWARD : self.step_forward,
self.RESTART : self.restart,
**self._pause_effects
}
# ensure time between frames is ignored while paused
class LogDict(dict):
def get(this, *args, **kwargs):
self.reset_delay()
return dict.get(this, *args, **kwargs)
self._pause_effects = LogDict(self._pause_effects)
self._get_latest_image() # re-initialise as ready
def get(self, property):
return super().get(self.properties.get(property, property))
def set(self, property, value):
return super().set(self.properties.get(property, property), value)
def _set_start(self, start):
''' Set the start of the video to user specification, if possible. '''
self._frame = 0
if start is not None:
if self.set_timestamp(start):
self.verbose_print(f'starting at {start}')
else:
self.verbose_print('start specification failed, '
'starting at 0:00')
self._start = self._frame
def _set_end(self, end):
''' Set playback to end where specified by user. '''
if end is not None:
if isinstance(end, str):
self._end = self.timestamp_to_ms(end)
else:
self._end = end
self._end /= self._period # convert to number of frames
else:
self._end = self.get('num_frames') or np.inf
def verbose_print(self, *args, **kwargs):
if self._verbose:
print(*args, **kwargs)
# NOTE: key callbacks set as static methods for clarity/ease of reference
# VideoReader to be modified gets passed in (so that external functions
# can be used), so also having a reference to self would be confusing.
@staticmethod
def _speed_up(vid):
''' Increase the speed by 10% of the initial value. '''
vid._speed += 0.1
vid._register_speed_change()
@staticmethod
def _slow_down(vid):
''' Reduce the speed by 10% of the initial value. '''
vid._speed -= 0.1
vid._register_speed_change()
def _register_speed_change(self):
''' Update internals and print new speed. '''
self._calculate_period()
self.verbose_print(f'speed set to {self._speed:.1f}x starting fps')
def _calculate_period(self):
''' Determine the adjusted period given the speed. '''
self._adjusted_period = self._period / self._speed
self._calculate_timestep()
def _calculate_timestep(self):
''' Determine the desired timestep of each iteration. '''
self._timestep = self._adjusted_period * self._frames
def _calculate_frames(self):
''' Determine the number of frames to increment each iteration. '''
self._frames = (1 + self._skip_frames
if self._skip_frames is not None
else 1)
self._calculate_timestep()
def reset_delay(self):
''' Resets the delay between frames.
Use to avoid fast playback/frame skipping after pauses.
'''
self._prev = perf_counter() - (self._period - self.MIN_DELAY) / 1e3
@staticmethod
def _go_back(vid):
''' Set playback to backwards. '''
if vid._skip_frames is not None:
vid._direction = vid.REVERSE_DIRECTION
vid.verbose_print('Rewinding')
else:
vid.verbose_print('Cannot go backwards without skip_frames=True')
@staticmethod
def _go_forward(vid):
''' Set playback to go forwards. '''
vid._direction = vid.FORWARD_DIRECTION
vid.verbose_print('Going forwards')
@staticmethod
def _reset(vid):
''' Restore playback to 1x speed and forwards. '''
vid._speed = 1
vid._direction = vid.FORWARD_DIRECTION
vid._calculate_period()
vid.verbose_print('Going forwards with speed set to 1x starting fps '
f'({vid._fps:.2f})')
@staticmethod
def step_back(vid):
''' Take a step backwards. '''
# store existing state
old_state = (vid._skip_frames, vid._direction, vid._verbose)
# enable back-stepping if not currently permitted
vid._skip_frames = 0
# make sure no unnecessary prints trigger from playback keys
vid._verbose = False
# go back a step
vid._direction = vid.REVERSE_DIRECTION
next(vid)
# restore state
vid._skip_frames, vid._direction, vid._verbose = old_state
@staticmethod
def step_forward(vid):
''' Take a step forwards. '''
# store existing state
old_state = (vid._direction, vid._verbose)
# make sure no unnecessary prints trigger from playback keys
vid._verbose = False
# go forwards a step
vid._direction = vid.FORWARD_DIRECTION
next(vid)
# restore state
vid._direction, vid._verbose = old_state
@staticmethod
def restart(vid):
''' Attempts to continue playback from the start of the video.
Respects user-defined start-point from initialisation.
'''
vid.set_frame(vid._start)
@property
def fps(self):
''' The constant FPS assumed of the video file. '''
return self.get('fps')
@property
def frame(self):
''' Retrieve the current video frame. '''
self._frame = int(self.get('frame'))
return self._frame
def set_frame(self, frame):
''' Attempts to set the frame number, returns success.
'frame' is an integer >= 0. Setting past the last frame
either has no effect or ends the playback.
self.set_frame(int) -> bool
'''
if self.set('frame', frame):
self._frame = frame
return True
return False
@property
def timestamp(self):
''' Returns the video timestamp if possible, else 0.0.
Returns a human-readable time string, as hours:minutes:seconds,
or minutes:seconds if there are no hours.
For the numerical ms value use self.get('timestamp') instead.
self.timestamp -> str
'''
# cv2.VideoCapture returns ms timestamp -> convert to meaningful time
seconds = self.get('timestamp') / 1000
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(round(minutes), 60)
if hours:
return f'{hours:02d}:{minutes:02d}:{seconds:06.3f}'
return f'{minutes:02d}:{seconds:06.3f}'
@property
def iso_timestamp(self):
''' Returns the video timestamp if possible, else 0.0.
Timestamp is in ISO 8601 duration format: PThh:mm:ss.sss
For a human-readable timestamp use self.timestamp.
For a numerical ms value use self.get('timestamp') instead.
self.iso_timestamp -> str
'''
timestamp = self.timestamp
# check if hours not specified
if len(timestamp) == 9:
timestamp = '00:' + timestamp
return f'PT{timestamp}'
def set_timestamp(self, timestamp):
''' Attempts to set the timestamp as specified, returns success.
'timestamp' can be a float/integer of milliseconds, or a string
of 'hours:minutes:seconds', 'minutes:seconds', or 'seconds',
where all values can be integers or floats.
self.set_timestamp(str/float/int) -> bool
'''
ms = self.timestamp_to_ms(timestamp) if isinstance(timestamp, str) \
else timestamp
fps = self._fps
if fps == 0:
# fps couldn't be determined - set ms directly and hope
return self.set('timestamp', ms)
return self.set_frame(int(ms * fps / 1e3))
@staticmethod
def timestamp_to_ms(timestamp):
''' Converts a string timestamp of hours:minutes:seconds to ms.'''
return 1000 * sum(60 ** index * float(period) for index, period \
in enumerate(reversed(timestamp.split(':'))))
def __iter__(self):
if self._delay is not None:
self._prev = perf_counter()
self._error = 0
self._delay = 1
return self
def __next__(self):
if self._delay is not None:
# auto-adjust to get closer to desired fps
now = perf_counter()
diff = 1e3 * (now - self._prev) # s to ms
self._error += diff - self._timestep
self._update_playback_settings()
self._prev = now
self._update_frame_tracking()
read_success, frame = super().__next__()
if not read_success:
raise OutOfFrames
return read_success, frame
def _update_playback_settings(self):
''' Adjusts delay/frame skipping if error is sufficiently large. '''
error_magnitude = abs(self._error)
if error_magnitude > self.MIN_DELAY:
# determine distribution of change
if self._skip_frames is not None:
# can only skip full frames, rest left to delay
skip_frames_change, delay_change = \
divmod(error_magnitude, self._adjusted_period)
else:
delay_change = error_magnitude
# can only delay in MIN_DELAY increments, remainder is error
delay_change, new_error_mag = \
divmod(delay_change, self.MIN_DELAY)
# determine if going too slowly (+) or too fast (-)
sign = np.sign(self._error)
# implement delay (and skip frames) change
# reducing delay increases speed
self._delay -= int(sign * delay_change)
if self._skip_frames is not None:
# skipping additional frames increases speed
self._skip_frames += int(sign * skip_frames_change)
self._calculate_frames() # update internals
self._error = sign * new_error_mag
if self._delay < self.MIN_DELAY:
self._error += self.MIN_DELAY - self._delay
self._delay = self.MIN_DELAY
def _update_frame_tracking(self):
# frame skip with no auto-delay allows continual frame skipping
# only set frame if necessary (moving one frame ahead isn't helpful)
if self._skip_frames is not None and \
(self._direction == -1 or self._frames != 1):
self._image_ready.wait()
self.set_frame(self._frame + self._frames * self._direction)
else:
self._frame += 1
if self.status == False or self._frame > self._end \
or self._frame < self._start:
raise OutOfFrames
def __repr__(self):
return f"{self.__class__.__name__}(filename={repr(self.filename)})"
if __name__ == '__main__':
with Camera(0) as cam:
cam.stream()
|
test_pipeline_process.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import traceback
from django.test import TestCase
from pipeline.django_signal_valve import valve
from pipeline.engine import states, signals, exceptions
from pipeline.engine.models import Status
from pipeline.engine.utils import Stack
from pipeline.engine.models.core import PipelineProcess, ProcessSnapshot, SubProcessRelationship
from ..mock import * # noqa
from pipeline.tests.mock_settings import * # noqa
valve.unload_valve_function()
class TestPipelineProcess(TestCase):
def test_prepare_for_pipeline(self):
pipeline = PipelineObject()
process = PipelineProcess.objects.prepare_for_pipeline(pipeline)
self.assertEqual(len(process.id), 32)
self.assertEqual(process.root_pipeline_id, pipeline.id)
self.assertEqual(process.current_node_id, pipeline.start_event.id)
self.assertIsNotNone(process.snapshot)
self.assertEqual(process.top_pipeline.id, pipeline.id)
def test_fork_child(self):
context = MockContext()
context.clear_change_keys = MagicMock()
pipeline = PipelineObject(context=context)
current_node_id = uniqid()
destination_id = uniqid()
process = PipelineProcess.objects.prepare_for_pipeline(pipeline)
child = PipelineProcess.objects.fork_child(
parent=process,
current_node_id=current_node_id,
destination_id=destination_id
)
self.assertEqual(len(child.id), 32)
self.assertEqual(process.root_pipeline_id, child.root_pipeline_id)
self.assertEqual(process.pipeline_stack, child.pipeline_stack)
self.assertEqual(process.children, child.children)
self.assertEqual(process.root_pipeline.id, child.root_pipeline.id)
self.assertEqual(process.subprocess_stack, child.subprocess_stack)
self.assertEqual(process.id, child.parent_id)
self.assertEqual(child.current_node_id, current_node_id)
self.assertEqual(child.destination_id, destination_id)
self.assertEqual(context.clear_change_keys.call_count, 1)
@patch(SIGNAL_VALVE_SEND, MagicMock())
def test_process_ready(self):
from pipeline.django_signal_valve.valve import send
process_id = uniqid()
current_node_id = uniqid()
PipelineProcess.objects.process_ready(process_id)
send.assert_called_with(
signals,
'process_ready',
sender=PipelineProcess,
process_id=process_id,
current_node_id=None,
call_from_child=False)
PipelineProcess.objects.process_ready(process_id, current_node_id, False)
send.assert_called_with(
signals,
'process_ready',
sender=PipelineProcess,
process_id=process_id,
current_node_id=current_node_id,
call_from_child=False)
PipelineProcess.objects.process_ready(process_id, current_node_id, True)
send.assert_called_with(
signals,
'process_ready',
sender=PipelineProcess,
process_id=process_id,
current_node_id=current_node_id,
call_from_child=True)
@patch(SIGNAL_VALVE_SEND, MagicMock())
def test_batch_process_ready(self):
from pipeline.django_signal_valve.valve import send
process_id_list = [uniqid(), uniqid(), uniqid()]
pipeline_id = uniqid()
PipelineProcess.objects.batch_process_ready(process_id_list, pipeline_id)
send.assert_called_with(
signals,
'batch_process_ready',
sender=PipelineProcess,
process_id_list=process_id_list,
pipeline_id=pipeline_id
)
@patch(SIGNAL_VALVE_SEND, MagicMock())
def test_child_process_ready(self):
from pipeline.django_signal_valve.valve import send
child_id = uniqid()
PipelineProcess.objects.child_process_ready(child_id)
send.assert_called_with(
signals,
'child_process_ready',
sender=PipelineProcess,
child_id=child_id
)
def test_properties(self):
process = PipelineProcess.objects.create()
pipeline_stack = Stack(['pipeline1', 'pipeline2'])
subprocess_stack = Stack(['subprocess1', 'subprocess2'])
children = ['child1', 'child2']
root_pipeline = 'root_pipeline'
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=pipeline_stack,
children=children,
root_pipeline=root_pipeline,
subprocess_stack=subprocess_stack
)
process.snapshot = mock_snapshot
self.assertEqual(process.pipeline_stack, pipeline_stack)
self.assertEqual(process.children, children)
self.assertEqual(process.root_pipeline, root_pipeline)
self.assertEqual(process.top_pipeline, pipeline_stack.top())
self.assertEqual(process.subprocess_stack, subprocess_stack)
def test_push_pipeline(self):
pipeline = 'pipeline_%s' % uniqid()
subproc_pipeline = PipelineObject()
process = PipelineProcess.objects.create()
pipeline_stack = Stack(['pipeline1', 'pipeline2'])
subprocess_stack = Stack(['subprocess1', 'subprocess2'])
children = ['child1', 'child2']
root_pipeline = 'root_pipeline'
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=pipeline_stack,
children=children,
root_pipeline=root_pipeline,
subprocess_stack=subprocess_stack
)
process.snapshot = mock_snapshot
process.id = uniqid()
process.push_pipeline(pipeline, is_subprocess=False)
self.assertEqual(process.top_pipeline, pipeline)
process.push_pipeline(subproc_pipeline, is_subprocess=True)
self.assertEqual(process.top_pipeline, subproc_pipeline)
self.assertTrue(
SubProcessRelationship.objects.filter(subprocess_id=subproc_pipeline.id, process_id=process.id).exists())
def test_pop_pipeline(self):
subproc_pipeline = PipelineObject()
process = PipelineProcess.objects.create()
pipeline_stack = Stack(['pipeline1', 'pipeline2'])
subprocess_stack = Stack(['subprocess1', 'subprocess2'])
children = ['child1', 'child2']
root_pipeline = 'root_pipeline'
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=pipeline_stack,
children=children,
root_pipeline=root_pipeline,
subprocess_stack=subprocess_stack
)
process.snapshot = mock_snapshot
process.id = uniqid()
process.push_pipeline(subproc_pipeline, is_subprocess=True)
self.assertEqual(process.top_pipeline, subproc_pipeline)
self.assertTrue(
SubProcessRelationship.objects.filter(subprocess_id=subproc_pipeline.id, process_id=process.id).exists())
pop_pipeline = process.pop_pipeline()
self.assertEqual(pop_pipeline.id, subproc_pipeline.id)
self.assertFalse(
SubProcessRelationship.objects.filter(subprocess_id=subproc_pipeline.id, process_id=process.id).exists()
)
pop_pipeline = process.pop_pipeline()
self.assertEqual(pop_pipeline, 'pipeline2')
pop_pipeline = process.pop_pipeline()
self.assertEqual(pop_pipeline, 'pipeline1')
def test_join(self):
children = [IdentifyObject(), IdentifyObject(), IdentifyObject()]
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[],
root_pipeline='root_pipeline',
subprocess_stack=Stack()
)
process = PipelineProcess.objects.create()
process.snapshot = mock_snapshot
process.join(children)
self.assertEqual(process.need_ack, len(children))
for i in range(len(children)):
self.assertEqual(process.children[i], children[i].id)
def test_root_sleep_check(self):
def return_suspended(*args, **kwargs):
return states.SUSPENDED
def return_revoked(*args, **kwargs):
return states.REVOKED
def return_blocked(*args, **kwargs):
return states.BLOCKED
another_status = MagicMock()
status = [states.CREATED, states.READY, states.RUNNING, states.FINISHED, states.FAILED]
another_status.side_effect = status
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[],
root_pipeline=IdentifyObject(),
subprocess_stack=Stack()
)
process = PipelineProcess.objects.create()
process.snapshot = mock_snapshot
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_suspended):
self.assertEqual(process.root_sleep_check(), (True, states.SUSPENDED))
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_revoked):
self.assertEqual(process.root_sleep_check(), (True, states.REVOKED))
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_blocked):
self.assertEqual(process.root_sleep_check(), (True, states.BLOCKED))
process.parent_id = 'parent_id'
self.assertEqual(process.root_sleep_check(), (False, states.BLOCKED))
with mock.patch(PIPELINE_STATUS_STATE_FOR, another_status):
for s in status:
self.assertEqual(process.root_sleep_check(), (False, s))
def test_subproc_sleep_check(self):
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[],
root_pipeline=IdentifyObject(),
subprocess_stack=Stack([1, 2, 3, 4])
)
process = PipelineProcess.objects.create()
process.snapshot = mock_snapshot
def return_all_running(*args, **kwargs):
return [
StatusObject(id=1, state=states.RUNNING),
StatusObject(id=2, state=states.RUNNING),
StatusObject(id=3, state=states.RUNNING),
StatusObject(id=4, state=states.RUNNING)
]
def return_one_suspended(*args, **kwargs):
return [
StatusObject(id=1, state=states.RUNNING),
StatusObject(id=2, state=states.SUSPENDED),
StatusObject(id=3, state=states.RUNNING),
StatusObject(id=4, state=states.RUNNING)
]
def return_first_suspended(*args, **kwargs):
return [
StatusObject(id=1, state=states.SUSPENDED),
StatusObject(id=2, state=states.RUNNING),
StatusObject(id=3, state=states.RUNNING),
StatusObject(id=4, state=states.RUNNING)
]
def return_last_suspended(*args, **kwargs):
return [
StatusObject(id=1, state=states.RUNNING),
StatusObject(id=2, state=states.RUNNING),
StatusObject(id=3, state=states.RUNNING),
StatusObject(id=4, state=states.SUSPENDED)
]
with mock.patch(PIPELINE_STATUS_FILTER, return_all_running):
self.assertEqual(process.subproc_sleep_check(), (False, [1, 2, 3, 4]))
with mock.patch(PIPELINE_STATUS_FILTER, return_one_suspended):
self.assertEqual(process.subproc_sleep_check(), (True, [1]))
with mock.patch(PIPELINE_STATUS_FILTER, return_first_suspended):
self.assertEqual(process.subproc_sleep_check(), (True, []))
with mock.patch(PIPELINE_STATUS_FILTER, return_last_suspended):
self.assertEqual(process.subproc_sleep_check(), (True, [1, 2, 3]))
@patch(PIPELINE_CELERYTASK_UNBIND, MagicMock())
def test_freeze(self):
from pipeline.engine.models import ProcessCeleryTask
pipeline = PipelineObject()
process = PipelineProcess.objects.prepare_for_pipeline(pipeline)
self.assertFalse(process.is_frozen)
process.freeze()
self.assertTrue(process.is_frozen)
process.refresh_from_db()
self.assertTrue(process.is_frozen)
ProcessCeleryTask.objects.unbind.assert_called_with(process.id)
@patch(SIGNAL_VALVE_SEND, MagicMock())
def test_unfreeze(self):
from pipeline.django_signal_valve.valve import send
pipeline = PipelineObject()
process = PipelineProcess.objects.prepare_for_pipeline(pipeline)
process.freeze()
process.unfreeze()
self.assertFalse(process.is_frozen)
process.refresh_from_db()
self.assertFalse(process.is_frozen)
send.assert_called_with(
signals,
'process_unfreeze',
sender=PipelineProcess,
process_id=process.id
)
@patch(PIPELINE_PROCESS_ADJUST_STATUS, MagicMock())
@patch(PIPELINE_CELERYTASK_UNBIND, MagicMock())
def test_sleep(self):
from pipeline.engine.models import ProcessCeleryTask
pipeline = PipelineObject()
process = PipelineProcess.objects.prepare_for_pipeline(pipeline)
process.sleep(do_not_save=True, adjust_status=True)
process.adjust_status.assert_called_with(None)
ProcessCeleryTask.objects.unbind.assert_not_called()
process.adjust_status.reset_mock()
process.sleep(do_not_save=True, adjust_status=True, adjust_scope=[1, 2, 3, 4])
process.adjust_status.assert_called_with([1, 2, 3, 4])
ProcessCeleryTask.objects.unbind.assert_not_called()
process.adjust_status.reset_mock()
process.sleep(do_not_save=False, adjust_status=False)
process.adjust_status.assert_not_called()
self.assertTrue(process.sleep)
ProcessCeleryTask.objects.unbind.assert_called_with(process.id)
with mock.patch(PIPELINE_PROCESS_CHILD_PROCESS_READY, MagicMock()):
process = PipelineProcess.objects.prepare_for_pipeline(pipeline)
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[1, 2, 3, 4],
root_pipeline=IdentifyObject(),
subprocess_stack=Stack([])
)
process.snapshot = mock_snapshot
process.sleep(do_not_save=False, adjust_status=False)
PipelineProcess.objects.child_process_ready.assert_has_calls([
mock.call(1),
mock.call(2),
mock.call(3),
mock.call(4)
])
@patch(PIPELINE_STATUS_BATCH_TRANSIT, MagicMock())
@patch(PIPELINE_STATUS_TRANSIT, MagicMock())
def test_adjust_status(self):
process = PipelineProcess.objects.create()
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[],
root_pipeline=IdentifyObject(id='root_pipeline_id'),
subprocess_stack=Stack([1, 2, 3, 4])
)
process.snapshot = mock_snapshot
process.current_node_id = 'current_node_id'
def return_suspended_for_node(id, may_not_exist=False):
if id == 'current_node_id':
return states.SUSPENDED
def return_failed_for_node(id, may_not_exist=False):
if id == 'current_node_id':
return states.FAILED
def return_suspended_for_root_pipeline(id, may_not_exist=False):
if id == 'root_pipeline_id':
return states.SUSPENDED
def return_none_for_node(*args, **kwargs):
return None
def return_empty_list_for_subproc(subprocess_stack):
return []
def return_all_running_for_subproc(subprocess_stack):
return [states.RUNNING, states.RUNNING, states.RUNNING, states.RUNNING]
def return_last_suspended_for_subproc(subprocess_stack):
return [states.RUNNING, states.RUNNING, states.RUNNING, states.SUSPENDED]
def return_one_suspended_for_subproc(subprocess_stack):
return [states.RUNNING, states.SUSPENDED, states.RUNNING, states.RUNNING]
node_state_possibility = [return_suspended_for_node, return_failed_for_node]
with mock.patch(PIPELINE_STATUS_STATES_FOR, return_empty_list_for_subproc):
for case in node_state_possibility:
with mock.patch(PIPELINE_STATUS_STATE_FOR, case):
process.adjust_status()
Status.objects.batch_transit.assert_called_with(
id_list=[1, 2, 3, 4],
state=states.BLOCKED,
from_state=states.RUNNING
)
Status.objects.transit.assert_called_with(
'root_pipeline_id',
to_state=states.BLOCKED,
is_pipeline=True
)
Status.objects.batch_transit.reset_mock()
Status.objects.transit.reset_mock()
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_suspended_for_root_pipeline):
process.adjust_status()
Status.objects.batch_transit.assert_called_with(
id_list=[1, 2, 3, 4],
state=states.SUSPENDED,
from_state=states.RUNNING
)
Status.objects.batch_transit.reset_mock()
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_none_for_node):
with mock.patch(PIPELINE_STATUS_STATES_FOR, return_all_running_for_subproc):
process.adjust_status()
Status.objects.batch_transit.assert_not_called()
with mock.patch(PIPELINE_STATUS_STATES_FOR, return_last_suspended_for_subproc):
process.adjust_status(adjust_scope=[1, 2, 3])
Status.objects.batch_transit.assert_called_with(
id_list=[1, 2, 3],
state=states.BLOCKED,
from_state=states.RUNNING
)
Status.objects.batch_transit.reset_mock()
with mock.patch(PIPELINE_STATUS_STATES_FOR, return_one_suspended_for_subproc):
process.adjust_status(adjust_scope=[1])
Status.objects.batch_transit.assert_called_with(
id_list=[1],
state=states.BLOCKED,
from_state=states.RUNNING
)
Status.objects.batch_transit.reset_mock()
def test_wake_up(self):
process = PipelineProcess.objects.create()
process.is_sleep = True
process.save()
self.assertTrue(process.is_sleep)
process.wake_up()
self.assertFalse(process.is_sleep)
@patch(PIPELINE_CELERYTASK_DESTROY, MagicMock())
def test_destroy(self):
from pipeline.engine.models import ProcessCeleryTask
process = PipelineProcess.objects.create()
process.id = uniqid()
process.current_node_id = 'current_node_id'
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[1, 2, 3, 4],
root_pipeline=IdentifyObject(),
subprocess_stack=Stack([])
)
mock_snapshot.delete = MagicMock()
process.snapshot = mock_snapshot
process.destroy()
self.assertFalse(process.is_alive)
self.assertEqual(process.current_node_id, '')
self.assertIsNone(process.snapshot)
mock_snapshot.delete.assert_called()
ProcessCeleryTask.objects.destroy.assert_called_with(process.id)
def test_save(self):
process = PipelineProcess.objects.create()
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[1, 2, 3, 4],
root_pipeline=IdentifyObject(),
subprocess_stack=Stack([])
)
mock_snapshot.save = MagicMock()
process.snapshot = mock_snapshot
process.save(save_snapshot=False)
mock_snapshot.save.assert_not_called()
process.save(save_snapshot=True)
mock_snapshot.save.assert_called()
mock_snapshot.save.reset_mock()
process.save()
mock_snapshot.save.assert_called()
def test_blocked_by_failure_or_suspended(self):
process = PipelineProcess.objects.create()
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[],
root_pipeline=IdentifyObject(),
subprocess_stack=Stack([])
)
process.snapshot = mock_snapshot
def return_suspended(*args, **kwargs):
return states.SUSPENDED
def return_failed(*args, **kwargs):
return states.FAILED
def return_none(*args, **kwargs):
return None
class MockChild(object):
def __init__(self, failed=False, suspended=False):
self.failed = failed
self.suspended = suspended
def blocked_by_failure_or_suspended(self):
return self.failed or self.suspended
def return_child_no_anomaly(*args, **kwargs):
return [MockChild(), MockChild(), MockChild()]
def return_child_has_failed(*args, **kwargs):
return [MockChild(), MockChild(), MockChild(failed=True)]
def return_child_has_suspended(*args, **kwargs):
return [MockChild(), MockChild(), MockChild(suspended=True)]
process.is_sleep = False
self.assertFalse(process.blocked_by_failure_or_suspended())
# 当前节点已经执行失败
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_failed):
process.is_sleep = True
self.assertTrue(process.blocked_by_failure_or_suspended())
# 当前节点被暂停
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_suspended):
process.is_sleep = True
self.assertTrue(process.blocked_by_failure_or_suspended())
# 整个流程进入了 SUSPENDED 状态,未开始执行下一个节点
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_none):
process.is_sleep = True
self.assertFalse(process.blocked_by_failure_or_suspended())
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[1, 2, 3],
root_pipeline=IdentifyObject(),
subprocess_stack=Stack([])
)
process.snapshot = mock_snapshot
# 子进程都没有异常
with mock.patch(PIPELINE_PROCESS_FILTER, return_child_no_anomaly):
process.is_sleep = True
self.assertFalse(process.blocked_by_failure_or_suspended())
# 子进程中存在失败的进程
with mock.patch(PIPELINE_PROCESS_FILTER, return_child_has_failed):
process.is_sleep = True
self.assertTrue(process.blocked_by_failure_or_suspended())
# 子进程中存在暂停的进程
with mock.patch(PIPELINE_PROCESS_FILTER, return_child_has_suspended):
process.is_sleep = True
self.assertTrue(process.blocked_by_failure_or_suspended())
def test_sync_with_children(self):
outputs = {'output_key': 'output_value'}
variables = {'variable_key': 'varaiable_value'}
process = PipelineProcess.objects.create()
context = Object()
context.update_global_var = MagicMock()
context.sync_change = MagicMock()
data = Object()
data.update_outputs = MagicMock()
mock_snapshot = ProcessSnapshot(
data={
'_pipeline_stack': Stack([PipelineObject(context=context, data=data)]),
'_children': [1, 2, 3, 4],
'_root_pipeline': IdentifyObject(),
'_subprocess_stack': Stack([])
}
)
process.snapshot = mock_snapshot
process.clean_children = MagicMock()
def return_none(*args, **kwargs):
return None
def return_mock(id):
if id.endswith('data'):
return DataObject(outputs=outputs)
if id.endswith('context'):
return ContextObject(variables=variables)
with mock.patch(PIPELINE_ENGINE_CORE_DATA_GET_OBJECT, return_none):
self.assertRaises(exceptions.ChildDataSyncError, process.sync_with_children)
with mock.patch(PIPELINE_ENGINE_CORE_DATA_GET_OBJECT, return_mock):
process.sync_with_children()
context.sync_change.assert_called()
data.update_outputs.assert_called_with(outputs)
process.clean_children.assert_called()
@patch(PIPELINE_ENGINE_CORE_DATA_SET_OBJECT, MagicMock())
@patch(PIPELINE_PROCESS_BLOCKED_BY_FAILURE, MagicMock())
@patch(PIPELINE_PROCESS_DESTROY, MagicMock())
@patch(PIPELINE_PROCESS_PROCESS_READY, MagicMock())
@patch(PIPELINE_STATUS_BATCH_TRANSIT, MagicMock())
@patch(PIPELINE_STATUS_TRANSIT, MagicMock())
def test_destroy_and_wake_up_parent(self):
context = MockContext()
context.clear_change_keys = MagicMock()
pipeline = PipelineObject(context=context)
process = PipelineProcess.objects.prepare_for_pipeline(pipeline)
children = []
for i in range(3):
children.append(process.__class__.objects.fork_child(process, 'current_node_id', 'destination_id'))
process.join(children)
# def worker(child):
# child.destroy_and_wake_up_parent(child.destination_id)
for child in children:
child.destroy_and_wake_up_parent(child.destination_id)
# sys_processes.append(Process(target=worker, args=(child,)))
# for p in sys_processes:
# p.start()
#
# for p in sys_processes:
# p.join()
process.refresh_from_db()
self.assertEqual(process.need_ack, -1)
self.assertEqual(process.ack_num, 0)
self.assertEqual(PipelineProcess.blocked_by_failure_or_suspended.call_count, 2)
PipelineProcess.objects.process_ready.assert_called_once()
self.assertEqual(PipelineProcess.destroy.call_count, 3)
def test__context_key(self):
process = PipelineProcess.objects.create()
process.id = uniqid()
self.assertEqual(process._context_key(), '{}_context'.format(process.id))
self.assertEqual(process._context_key(process_id='another_id'), '{}_context'.format('another_id'))
def test__data_key(self):
process = PipelineProcess.objects.create()
process.id = uniqid()
self.assertEqual(process._data_key(), '{}_data'.format(process.id))
self.assertEqual(process._data_key(process_id='another_id'), '{}_data'.format('another_id'))
def test_can_be_waked(self):
process = PipelineProcess.objects.create()
process.is_sleep = False
process.is_alive = False
self.assertFalse(process.can_be_waked())
process.is_sleep = True
process.is_alive = False
self.assertFalse(process.can_be_waked())
process.is_sleep = False
process.is_alive = True
self.assertFalse(process.can_be_waked())
process.is_sleep = True
process.is_alive = True
process.need_ack = 3
process.ack_num = 2
self.assertFalse(process.can_be_waked())
process.need_ack = 3
process.ack_num = 3
self.assertTrue(process.can_be_waked())
process.need_ack = -1
self.assertTrue(process.can_be_waked())
@patch(PIPELINE_ENGINE_CORE_DATA_DEL_OBJECT, MagicMock())
def test_clean_children(self):
from pipeline.engine.core.data import del_object
mock_snapshot = ProcessSnapshot(
data={
'_pipeline_stack': Stack(),
'_children': ['1', '2', '3'],
'_root_pipeline': IdentifyObject(),
'_subprocess_stack': Stack([])
}
)
mock_snapshot.clean_children = MagicMock()
mock_snapshot.save = MagicMock()
process = PipelineProcess.objects.create()
process.snapshot = mock_snapshot
process.clean_children()
del_object.assert_has_calls([
mock.call(process._context_key('1')),
mock.call(process._data_key('1')),
mock.call(process._context_key('2')),
mock.call(process._data_key('2')),
mock.call(process._context_key('3')),
mock.call(process._data_key('3')),
])
mock_snapshot.clean_children.assert_called()
mock_snapshot.save.assert_called()
@patch(PIPELINE_STATUS_FAIL, MagicMock())
@patch(PIPELINE_STATUS_RAW_FAIL, MagicMock())
def test_exit_gracefully(self):
mock_snapshot = ProcessSnapshot(
data={
'_pipeline_stack': Stack(),
'_children': ['1', '2', '3'],
'_root_pipeline': PipelineObject(),
'_subprocess_stack': Stack([])
}
)
process = PipelineProcess.objects.create()
process.snapshot = mock_snapshot
process.sleep = MagicMock()
e = Exception('test')
process.current_node_id = uniqid()
process.exit_gracefully(e)
Status.objects.fail.assert_called_with(process.current_node_id, ex_data=traceback.format_exc(e))
Status.objects.raw_fail.assert_not_called()
process.sleep.assert_called_with(adjust_status=True)
Status.objects.fail.reset_mock()
process.sleep.reset_mock()
# when stack is not empty
mock_snapshot.data['_pipeline_stack'] = Stack([PipelineObject()])
process.current_node_id = uniqid()
process.exit_gracefully(e)
Status.objects.fail.assert_called_with(process.current_node_id, ex_data=traceback.format_exc(e))
Status.objects.raw_fail.assert_not_called()
process.sleep.assert_called_with(adjust_status=True)
Status.objects.fail.reset_mock()
process.sleep.reset_mock()
# when current_node is none
top_pipeline = PipelineObject()
top_pipeline.node = MagicMock(return_value=None)
mock_snapshot.data['_pipeline_stack'] = Stack([top_pipeline])
process.current_node_id = uniqid()
process.exit_gracefully(e)
Status.objects.fail.assert_not_called()
Status.objects.raw_fail.assert_called_with(process.current_node_id, ex_data=traceback.format_exc(e))
process.sleep.assert_called_with(adjust_status=True)
def test_refresh_current_node(self):
node_id = uniqid()
process = PipelineProcess.objects.create()
process.refresh_current_node(node_id)
process.refresh_from_db()
self.assertEqual(process.current_node_id, node_id)
@patch(PIPELINE_STATUS_BATCH_TRANSIT, MagicMock())
def test_revoke_subprocess(self):
mock_snapshot = ProcessSnapshot(
data={
'_pipeline_stack': Stack(),
'_children': [],
'_root_pipeline': PipelineObject(),
'_subprocess_stack': Stack([1, 2, 3, 4])
}
)
process = PipelineProcess.objects.create(id=uniqid())
process.snapshot = mock_snapshot
process.sleep = MagicMock()
process.revoke_subprocess()
Status.objects.batch_transit.assert_called_with(id_list=[1, 2, 3, 4], state=states.REVOKED)
child_1 = Object()
child_2 = Object()
child_3 = Object()
child_1.revoke_subprocess = MagicMock()
child_2.revoke_subprocess = MagicMock()
child_3.revoke_subprocess = MagicMock()
def get_child(id):
return {
1: child_1,
2: child_2,
3: child_3
}[id]
mock_snapshot.data['_children'] = [1, 2, 3]
with mock.patch(PIPELINE_PROCESS_GET, get_child):
process.revoke_subprocess()
Status.objects.batch_transit.assert_called_with(id_list=[1, 2, 3, 4], state=states.REVOKED)
child_1.revoke_subprocess.assert_called()
child_2.revoke_subprocess.assert_called()
child_3.revoke_subprocess.assert_called()
# test when subprocess_stack and children return None
process = PipelineProcess.objects.create(id=uniqid())
self.assertIsNone(process.subprocess_stack)
self.assertIsNone(process.children)
process.revoke_subprocess()
@patch(PIPELINE_PROCESS_DESTROY, MagicMock())
def test_destroy_all(self):
mock_snapshot = ProcessSnapshot(
data={
'_pipeline_stack': Stack(),
'_children': [],
'_root_pipeline': PipelineObject(),
'_subprocess_stack': Stack([])
}
)
process = PipelineProcess.objects.create()
process.snapshot = mock_snapshot
process.is_alive = False
process.destroy_all()
process.destroy.assert_not_called()
process.is_alive = True
process.destroy_all()
process.destroy.assert_called()
process.destroy.reset_mock()
mock_snapshot.data['_children'] = [1, 2, 3]
child_1 = Object()
child_1.children = []
child_1.destroy = MagicMock()
child_1.is_alive = True
child_2 = Object()
child_2.children = []
child_2.destroy = MagicMock()
child_2.is_alive = False
child_3 = Object()
child_3.children = [1]
child_3.destroy = MagicMock()
child_3.is_alive = True
def get_child(id):
return {
1: child_1,
2: child_2,
3: child_3
}[id]
with mock.patch(PIPELINE_PROCESS_GET, get_child):
process.destroy_all()
child_1.destroy.assert_called()
child_2.destroy.assert_not_called()
child_3.destroy.assert_called()
self.assertEqual(child_1.destroy.call_count, 2)
def test_in_subprocess__true(self):
snapshot = ProcessSnapshot(
data={
'_pipeline_stack': Stack([1, 2]),
}
)
process = PipelineProcess()
process.snapshot = snapshot
self.assertTrue(process.in_subprocess)
def test_in_subprocess__false(self):
snapshot = ProcessSnapshot(
data={
'_pipeline_stack': Stack([1]),
}
)
process = PipelineProcess()
process.snapshot = snapshot
self.assertFalse(process.in_subprocess)
|
test_utils.py
|
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import copy
import logging
import multiprocessing
import os
import shutil
import sys
import tempfile
import textwrap
import time
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from io import BytesIO
from enum import Enum
import numpy as np
try:
import pandas as pd
except ImportError: # pragma: no cover
pd = None
import pytest
from mars import utils
from mars.core import tile
import mars.tensor as mt
def test_string_conversion():
s = None
assert utils.to_binary(s) is None
assert utils.to_str(s) is None
assert utils.to_text(s) is None
s = 'abcdefg'
assert isinstance(utils.to_binary(s), bytes)
assert utils.to_binary(s) == b'abcdefg'
assert isinstance(utils.to_str(s), str)
assert utils.to_str(s) == 'abcdefg'
assert isinstance(utils.to_text(s), str)
assert utils.to_text(s) == u'abcdefg'
ustr = type('ustr', (str,), {})
assert isinstance(utils.to_str(ustr(s)), str)
assert utils.to_str(ustr(s)) == 'abcdefg'
s = b'abcdefg'
assert isinstance(utils.to_binary(s), bytes)
assert utils.to_binary(s) == b'abcdefg'
assert isinstance(utils.to_str(s), str)
assert utils.to_str(s) == 'abcdefg'
assert isinstance(utils.to_text(s), str)
assert utils.to_text(s) == u'abcdefg'
ubytes = type('ubytes', (bytes,), {})
assert isinstance(utils.to_binary(ubytes(s)), bytes)
assert utils.to_binary(ubytes(s)) == b'abcdefg'
s = u'abcdefg'
assert isinstance(utils.to_binary(s), bytes)
assert utils.to_binary(s) == b'abcdefg'
assert isinstance(utils.to_str(s), str)
assert utils.to_str(s) == 'abcdefg'
assert isinstance(utils.to_text(s), str)
assert utils.to_text(s) == u'abcdefg'
uunicode = type('uunicode', (str,), {})
assert isinstance(utils.to_text(uunicode(s)), str)
assert utils.to_text(uunicode(s)) == u'abcdefg'
with pytest.raises(TypeError):
utils.to_binary(utils)
with pytest.raises(TypeError):
utils.to_str(utils)
with pytest.raises(TypeError):
utils.to_text(utils)
def test_tokenize():
import shutil
import tempfile
class TestEnum(Enum):
VAL1 = 'val1'
tempdir = tempfile.mkdtemp('mars_test_utils_')
try:
filename = os.path.join(tempdir, 'test_npa.dat')
mmp_array = np.memmap(filename, dtype=float, mode='w+', shape=(3, 4))
mmp_array[:] = np.random.random((3, 4)).astype(float)
mmp_array.flush()
del mmp_array
mmp_array1 = np.memmap(filename, dtype=float, shape=(3, 4))
mmp_array2 = np.memmap(filename, dtype=float, shape=(3, 4))
try:
v = [1, 2.3, '456', u'789', b'101112', 2147483649, None, np.ndarray,
[912, 'uvw'], np.arange(0, 10), np.array(10), np.array([b'\x01\x32\xff']),
np.int64, TestEnum.VAL1]
copy_v = copy.deepcopy(v)
assert (utils.tokenize(v + [mmp_array1], ext_data=1234)
== utils.tokenize(copy_v + [mmp_array2], ext_data=1234))
finally:
del mmp_array1, mmp_array2
finally:
shutil.rmtree(tempdir)
v = {'a', 'xyz', 'uvw'}
assert utils.tokenize(v) == utils.tokenize(copy.deepcopy(v))
v = dict(x='abcd', y=98765)
assert utils.tokenize(v) == utils.tokenize(copy.deepcopy(v))
v = dict(x=dict(a=1, b=[1, 2, 3]), y=12345)
assert utils.tokenize(v) == utils.tokenize(copy.deepcopy(v))
# pandas relative
if pd is not None:
df = pd.DataFrame([[utils.to_binary('测试'), utils.to_text('数据')]],
index=['a'], columns=['中文', 'data'])
v = [df, df.index, df.columns, df['data'], pd.Categorical(list('ABCD'))]
assert utils.tokenize(v) == utils.tokenize(copy.deepcopy(v))
class NonTokenizableCls:
def __getstate__(self):
raise SystemError
with pytest.raises(TypeError):
utils.tokenize(NonTokenizableCls())
class CustomizedTokenize(object):
def __mars_tokenize__(self):
return id(type(self)), id(NonTokenizableCls)
assert utils.tokenize(CustomizedTokenize()) == utils.tokenize(CustomizedTokenize())
v = lambda x: x + 1
assert utils.tokenize(v) == utils.tokenize(copy.deepcopy(v))
def f(a, b):
return np.add(a, b)
assert utils.tokenize(f) == utils.tokenize(copy.deepcopy(f))
partial_f = partial(f, 1, k=0)
partial_f2 = partial(f, 1, k=1)
assert utils.tokenize(partial_f) == utils.tokenize(copy.deepcopy(partial_f))
assert utils.tokenize(partial_f) != utils.tokenize(partial_f2)
def test_lazy_import():
old_sys_path = sys.path
mock_mod = textwrap.dedent("""
__version__ = '0.1.0b1'
""".strip())
temp_dir = tempfile.mkdtemp(prefix='mars-utils-test-')
sys.path += [temp_dir]
try:
with open(os.path.join(temp_dir, 'test_mod.py'), 'w') as outf:
outf.write(mock_mod)
non_exist_mod = utils.lazy_import('non_exist_mod', locals=locals())
assert non_exist_mod is None
mod = utils.lazy_import(
'test_mod', globals=globals(), locals=locals(), rename='mod')
assert mod is not None
assert mod.__version__ == '0.1.0b1'
glob = globals().copy()
mod = utils.lazy_import(
'test_mod', globals=glob, locals=locals(), rename='mod')
glob['mod'] = mod
assert mod is not None
assert mod.__version__ == '0.1.0b1'
assert type(glob['mod']).__name__ == 'module'
finally:
shutil.rmtree(temp_dir)
sys.path = old_sys_path
def test_chunks_indexer():
a = mt.ones((3, 4, 5), chunk_size=2)
a = tile(a)
assert a.chunk_shape == (2, 2, 3)
with pytest.raises(ValueError):
_ = a.cix[1]
with pytest.raises(ValueError):
_ = a.cix[1, :]
chunk_key = a.cix[0, 0, 0].key
expected = a.chunks[0].key
assert chunk_key == expected
chunk_key = a.cix[1, 1, 1].key
expected = a.chunks[9].key
assert chunk_key == expected
chunk_key = a.cix[1, 1, 2].key
expected = a.chunks[11].key
assert chunk_key == expected
chunk_key = a.cix[0, -1, -1].key
expected = a.chunks[5].key
assert chunk_key == expected
chunk_key = a.cix[0, -1, -1].key
expected = a.chunks[5].key
assert chunk_key == expected
chunk_keys = [c.key for c in a.cix[0, 0, :]]
expected = [c.key for c in [a.cix[0, 0, 0], a.cix[0, 0, 1], a.cix[0, 0, 2]]]
assert chunk_keys == expected
chunk_keys = [c.key for c in a.cix[:, 0, :]]
expected = [c.key for c in [a.cix[0, 0, 0], a.cix[0, 0, 1], a.cix[0, 0, 2],
a.cix[1, 0, 0], a.cix[1, 0, 1], a.cix[1, 0, 2]]]
assert chunk_keys == expected
chunk_keys = [c.key for c in a.cix[:, :, :]]
expected = [c.key for c in a.chunks]
assert chunk_keys == expected
def test_insert_reversed_tuple():
assert utils.insert_reversed_tuple((), 9) == (9,)
assert utils.insert_reversed_tuple((7, 4, 3, 1), 9) == (9, 7, 4, 3, 1)
assert utils.insert_reversed_tuple((7, 4, 3, 1), 6) == (7, 6, 4, 3, 1)
assert utils.insert_reversed_tuple((7, 4, 3, 1), 4) == (7, 4, 3, 1)
assert utils.insert_reversed_tuple((7, 4, 3, 1), 0) == (7, 4, 3, 1, 0)
def test_require_not_none():
@utils.require_not_none(1)
def should_exist():
pass
assert should_exist is not None
@utils.require_not_none(None)
def should_not_exist():
pass
assert should_not_exist is None
@utils.require_module('numpy.fft')
def should_exist_np():
pass
assert should_exist_np is not None
@utils.require_module('numpy.fft_error')
def should_not_exist_np():
pass
assert should_not_exist_np is None
def test_type_dispatcher():
dispatcher = utils.TypeDispatcher()
type1 = type('Type1', (), {})
type2 = type('Type2', (type1,), {})
type3 = type('Type3', (), {})
dispatcher.register(object, lambda x: 'Object')
dispatcher.register(type1, lambda x: 'Type1')
dispatcher.register('pandas.DataFrame', lambda x: 'DataFrame')
assert 'Type1' == dispatcher(type2())
assert 'DataFrame' == dispatcher(pd.DataFrame())
assert 'Object' == dispatcher(type3())
dispatcher.unregister(object)
with pytest.raises(KeyError):
dispatcher(type3())
def test_fixed_size_file_object():
arr = [str(i).encode() * 20 for i in range(10)]
bts = os.linesep.encode().join(arr)
bio = BytesIO(bts)
ref_bio = BytesIO(bio.read(100))
bio.seek(0)
ref_bio.seek(0)
fix_bio = utils.FixedSizeFileObject(bio, 100)
assert ref_bio.readline() == fix_bio.readline()
assert ref_bio.tell() == fix_bio.tell()
pos = ref_bio.tell() + 10
assert ref_bio.seek(pos) == fix_bio.seek(pos)
assert ref_bio.read(5) == fix_bio.read(5)
assert ref_bio.readlines(25) == fix_bio.readlines(25)
assert list(ref_bio) == list(fix_bio)
def test_timer():
with utils.Timer() as timer:
time.sleep(0.1)
assert timer.duration >= 0.1
def test_quiet_stdio():
old_stdout, old_stderr = sys.stdout, sys.stderr
class _IOWrapper:
def __init__(self, name=None):
self.name = name
self.content = ''
@staticmethod
def writable():
return True
def write(self, d):
self.content += d
return len(d)
stdout_w = _IOWrapper('stdout')
stderr_w = _IOWrapper('stderr')
executor = ThreadPoolExecutor(1)
try:
sys.stdout = stdout_w
sys.stderr = stderr_w
with utils.quiet_stdio():
with utils.quiet_stdio():
assert sys.stdout.writable()
assert sys.stderr.writable()
print('LINE 1', end='\n')
print('LINE 2', file=sys.stderr, end='\n')
executor.submit(print, 'LINE T').result()
print('LINE 3', end='\n')
print('LINE 1', end='\n')
print('LINE 2', file=sys.stderr, end='\n')
finally:
sys.stdout, sys.stderr = old_stdout, old_stderr
assert stdout_w.content == 'LINE T\nLINE 1\n'
assert stderr_w.content == 'LINE 2\n'
@pytest.mark.asyncio
@pytest.mark.skipif(sys.version_info[:2] < (3, 7),
reason='asyncio task timeout detector is not supported on python versions below 3.7')
async def test_asyncio_task_timeout_detector():
log_file_name = 'test_asyncio_task_timeout_detector.log'
try:
os.environ['MARS_DEBUG_ASYNCIO_TASK_TIMEOUT_CHECK_INTERVAL'] = '1'
p = multiprocessing.Process(target=_run_task_timeout_detector, args=(log_file_name,))
p.start()
while p.is_alive():
await asyncio.sleep(0.1)
with open(log_file_name, 'r') as f:
detector_log = f.read()
assert 'timeout_func' in detector_log
finally:
os.environ.pop('MARS_DEBUG_ASYNCIO_TASK_TIMEOUT_CHECK_INTERVAL')
if os.path.exists(log_file_name):
os.remove(log_file_name)
def _run_task_timeout_detector(log_file_name):
from ..utils import logger, register_asyncio_task_timeout_detector
fh = logging.FileHandler(log_file_name)
fh.setLevel(logging.INFO)
logger.addHandler(fh)
async def timeout_func():
await asyncio.sleep(2)
async def main():
task = register_asyncio_task_timeout_detector()
await asyncio.create_task(timeout_func())
task.cancel()
asyncio.run(main())
def test_module_placeholder():
required_module = utils.ModulePlaceholder('required_module')
with pytest.raises(AttributeError):
required_module()
with pytest.raises(AttributeError) as e:
required_module.method()
msg = e.value.args[0]
assert msg == 'required_module is required but not installed.'
def test_merge_dict():
from ..utils import merge_dict
assert merge_dict({}, {1: 2}) == {1: 2}
assert merge_dict({1: 2}, {}) == {1: 2}
assert merge_dict({'a': {1: 2}, 'b': {2: 3}, 'c': {1: {2: 3}}},
{'a': {1: 3}, 'b': {2: 3}, 'c': {1: {2: 4}}}) ==\
{'a': {1: 3}, 'b': {2: 3}, 'c': {1: {2: 4}}}
with pytest.raises(ValueError):
merge_dict({'a': {1: 2}, 'b': {2: 3}}, {'a': {1: 3}}, overwrite=False)
def test_flatten_dict_to_nested_dict():
from ..utils import flatten_dict_to_nested_dict
assert flatten_dict_to_nested_dict({}) == {}
with pytest.raises(ValueError):
flatten_dict_to_nested_dict({'a.b.c': 1, 'a.b': 2})
assert flatten_dict_to_nested_dict({'a.b.c': 1, 'a.b.d': 2}) == {'a': {'b': {'c': 1, 'd': 2}}}
def test_readable_size():
assert utils.readable_size(32) == '32.00'
assert utils.readable_size(14354) == '14.02K'
assert utils.readable_size(14354000) == '13.69M'
assert utils.readable_size(14354000000) == '13.37G'
assert utils.readable_size(14354000000000) == '13.05T'
|
deepnano2_caller_gpu.py
|
#!/usr/bin/env python
from ont_fast5_api.fast5_interface import get_fast5_file
import argparse
import os
import numpy as np
import datetime
import deepnano2
from torch import multiprocessing as mp
from torch.multiprocessing import Pool
import torch
from deepnano2.gpu_model import Net
from tqdm import tqdm
# TODO: change cuda:0 into something better
step = 550
pad = 25
reads_in_group = 100
torch.set_grad_enabled(False)
def caller(model, qin, qout):
while True:
item = qin.get()
if item is None:
qout.put(None)
break
read_marks, batch = item
net_result = model(batch)
qout.put((read_marks, net_result))
item = qin.get()
def finalizer(fn, qin):
fo = open(fn, "w")
alph = np.array(["N", "A", "C", "G", "T"])
cur_rows = []
while True:
item = qin.get()
if item is None:
if len(cur_rows) > 0:
stack = np.vstack(cur_rows)
seq = deepnano2.beam_search_py(stack, 5, 0.1)
print(seq, file=fo)
return
read_marks, res = item
res = res.to(device='cpu', dtype=torch.float32).numpy()
for read_mark, row in zip(read_marks, res):
if read_mark is not None:
if len(cur_rows) > 0:
stack = np.vstack(cur_rows)
seq = deepnano2.beam_search_py(stack, 5, 0.1)
print(seq, file=fo)
cur_rows = []
print(">%s" % read_mark, file=fo)
cur_rows.append(row[pad:-pad])
def med_mad(x, factor=1.4826):
"""
Calculate signal median and median absolute deviation
"""
med = np.median(x)
mad = np.median(np.absolute(x - med)) * factor
return med, mad
def rescale_signal(signal):
signal = signal.astype(np.float32)
med, mad = med_mad(signal)
signal -= med
signal /= mad
return np.clip(signal, -2.5, 2.5)
if __name__ == '__main__':
mp.set_start_method('spawn')
parser = argparse.ArgumentParser(description='Fast caller for ONT reads')
parser.add_argument('--directory', type=str, nargs='*', help='One or more directories with reads')
parser.add_argument('--reads', type=str, nargs='*', help='One or more read files')
parser.add_argument("--output", type=str, required=True, help="Output FASTA file name")
parser.add_argument("--threads", type=int, default=1, help="Number of threads for basecalling")
parser.add_argument("--weights", type=str, default=None, help="Path to network weights")
parser.add_argument("--network-type", choices=["fast", "accurate"], default="fast")
parser.add_argument("--beam-size", type=int, default=None, help="Beam size (defaults 5 for fast and 20 for accurate. Use 1 to disable.")
parser.add_argument("--beam-cut-threshold", type=float, default=None, help="Threshold for creating beams (higher means faster beam search, but smaller accuracy)")
parser.add_argument('--half', dest='half', action='store_true', help='Use half precision (fp16) during basecalling. On new graphics card, it might speed things up')
parser.add_argument('--batch-size', type=int, default=512, help='Batch size for calling, longer ones are usually faster, unless you get GPU OOM error')
parser.set_defaults(half=False)
args = parser.parse_args()
assert args.threads >= 1
files = args.reads if args.reads else []
if args.directory:
for directory_name in args.directory:
files += [os.path.join(directory_name, fn) for fn in os.listdir(directory_name)]
if len(files) == 0:
print("Zero input reads, nothing to do.")
sys.exit()
weights = os.path.join(deepnano2.__path__[0], "weights", "weightsbig520.pt")
torch.set_grad_enabled(False)
model = Net()
model.load_state_dict(torch.load(weights))
model.eval()
model.cuda()
if args.half:
model.half()
model.share_memory()
qcaller = mp.Queue(10)
qfinalizer = mp.Queue()
call_proc = mp.Process(target=caller, args=(model, qcaller, qfinalizer))
final_proc = mp.Process(target=finalizer, args=(args.output, qfinalizer))
call_proc.start()
final_proc.start()
chunk_dtype = torch.float16 if args.half else torch.float32
start_time = datetime.datetime.now()
print("start", start_time)
chunks = []
read_marks = []
for fn in tqdm(files):
try:
with get_fast5_file(fn, mode="r") as f5:
for read in f5.get_reads():
read_id = read.get_read_id()
signal = rescale_signal(read.get_raw_data())
for i in range(0, len(signal), 3*step):
if i + 3*step + 6*pad > len(signal):
break
part = np.array(signal[i:i+3*step+6*pad])
if i == 0:
read_marks.append(read_id)
else:
read_marks.append(None)
chunks.append(np.vstack([part, part * part]).T)
if len(chunks) == args.batch_size:
qcaller.put((read_marks, torch.tensor(np.stack(chunks), dtype=chunk_dtype, device='cuda:0')))
chunks = []
read_marks = []
except OSError:
# TODO show something here
pass
if len(chunks) > 0:
qcaller.put((read_marks, torch.tensor(np.stack(chunks), dtype=chunk_dtype, device='cuda:0')))
qcaller.put(None)
final_proc.join()
qcaller.put(None)
call_proc.join()
print("fin", datetime.datetime.now() - start_time)
|
test_base_events.py
|
"""Tests for base_events.py"""
import errno
import logging
import math
import os
import socket
import sys
import threading
import time
import unittest
from unittest import mock
import asyncio
from asyncio import base_events
from asyncio import constants
from asyncio import events
from test.test_asyncio import utils as test_utils
from test import support
from test.support.script_helper import assert_python_ok
MOCK_ANY = mock.ANY
PY34 = sys.version_info >= (3, 4)
def mock_socket_module():
m_socket = mock.MagicMock(spec=socket)
for name in (
'AF_INET', 'AF_INET6', 'AF_UNSPEC', 'IPPROTO_TCP', 'IPPROTO_UDP',
'SOCK_STREAM', 'SOCK_DGRAM', 'SOL_SOCKET', 'SO_REUSEADDR', 'inet_pton'
):
if hasattr(socket, name):
setattr(m_socket, name, getattr(socket, name))
else:
delattr(m_socket, name)
m_socket.socket = mock.MagicMock()
m_socket.socket.return_value = test_utils.mock_nonblocking_socket()
m_socket.getaddrinfo._is_coroutine = False
return m_socket
def patch_socket(f):
return mock.patch('asyncio.base_events.socket',
new_callable=mock_socket_module)(f)
class BaseEventTests(test_utils.TestCase):
def test_ipaddr_info(self):
UNSPEC = socket.AF_UNSPEC
INET = socket.AF_INET
INET6 = socket.AF_INET6
STREAM = socket.SOCK_STREAM
DGRAM = socket.SOCK_DGRAM
TCP = socket.IPPROTO_TCP
UDP = socket.IPPROTO_UDP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info(b'1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, TCP))
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, UDP))
# Socket type STREAM implies TCP protocol.
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, 0))
# Socket type DGRAM implies UDP protocol.
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, 0))
# No socket type.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, 0, 0))
# IPv4 address with family IPv6.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1)),
base_events._ipaddr_info('::3', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1)),
base_events._ipaddr_info('::3', 1, UNSPEC, STREAM, TCP))
# IPv6 address with family IPv4.
self.assertIsNone(
base_events._ipaddr_info('::3', 1, INET, STREAM, TCP))
# IPv6 address with zone index.
self.assertIsNone(
base_events._ipaddr_info('::3%lo0', 1, INET6, STREAM, TCP))
def test_port_parameter_types(self):
# Test obscure kinds of arguments for "port".
INET = socket.AF_INET
STREAM = socket.SOCK_STREAM
TCP = socket.IPPROTO_TCP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', None, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', b'', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', '', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', '1', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', b'1', INET, STREAM, TCP))
@patch_socket
def test_ipaddr_info_no_inet_pton(self, m_socket):
del m_socket.inet_pton
self.assertIsNone(base_events._ipaddr_info('1.2.3.4', 1,
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP))
class BaseEventLoopTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = base_events.BaseEventLoop()
self.loop._selector = mock.Mock()
self.loop._selector.select.return_value = ()
self.set_event_loop(self.loop)
def test_not_implemented(self):
m = mock.Mock()
self.assertRaises(
NotImplementedError,
self.loop._make_socket_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_ssl_transport, m, m, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_datagram_transport, m, m)
self.assertRaises(
NotImplementedError, self.loop._process_events, [])
self.assertRaises(
NotImplementedError, self.loop._write_to_self)
self.assertRaises(
NotImplementedError,
self.loop._make_read_pipe_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_write_pipe_transport, m, m)
gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m)
with self.assertRaises(NotImplementedError):
gen.send(None)
def test_close(self):
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = asyncio.Future(loop=self.loop)
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test__add_callback_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertIn(h, self.loop._ready)
def test__add_callback_cancelled_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
h.cancel()
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertFalse(self.loop._ready)
def test_set_default_executor(self):
executor = mock.Mock()
self.loop.set_default_executor(executor)
self.assertIs(executor, self.loop._default_executor)
def test_call_soon(self):
def cb():
pass
h = self.loop.call_soon(cb)
self.assertEqual(h._callback, cb)
self.assertIsInstance(h, asyncio.Handle)
self.assertIn(h, self.loop._ready)
def test_call_soon_non_callable(self):
self.loop.set_debug(True)
with self.assertRaisesRegex(TypeError, 'a callable object'):
self.loop.call_soon(1)
def test_call_later(self):
def cb():
pass
h = self.loop.call_later(10.0, cb)
self.assertIsInstance(h, asyncio.TimerHandle)
self.assertIn(h, self.loop._scheduled)
self.assertNotIn(h, self.loop._ready)
def test_call_later_negative_delays(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop._process_events = mock.Mock()
self.loop.call_later(-1, cb, 'a')
self.loop.call_later(-2, cb, 'b')
test_utils.run_briefly(self.loop)
self.assertEqual(calls, ['b', 'a'])
def test_time_and_call_at(self):
def cb():
self.loop.stop()
self.loop._process_events = mock.Mock()
delay = 0.1
when = self.loop.time() + delay
self.loop.call_at(when, cb)
t0 = self.loop.time()
self.loop.run_forever()
dt = self.loop.time() - t0
# 50 ms: maximum granularity of the event loop
self.assertGreaterEqual(dt, delay - 0.050, dt)
# tolerate a difference of +800 ms because some Python buildbots
# are really slow
self.assertLessEqual(dt, 0.9, dt)
def check_thread(self, loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an event loop other "
"than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def test_check_thread(self):
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = base_events.BaseEventLoop()
try:
asyncio.set_event_loop(loop2)
self.check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
self.check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = asyncio.Future(loop=loop)
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test__run_once(self):
h1 = asyncio.TimerHandle(time.monotonic() + 5.0, lambda: True, (),
self.loop, None)
h2 = asyncio.TimerHandle(time.monotonic() + 10.0, lambda: True, (),
self.loop, None)
h1.cancel()
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h1)
self.loop._scheduled.append(h2)
self.loop._run_once()
t = self.loop._selector.select.call_args[0][0]
self.assertTrue(9.5 < t < 10.5, t)
self.assertEqual([h2], self.loop._scheduled)
self.assertTrue(self.loop._process_events.called)
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
@mock.patch('asyncio.base_events.logger')
def test__run_once_logging(self, m_logger):
def slow_select(timeout):
# Sleep a bit longer than a second to avoid timer resolution
# issues.
time.sleep(1.1)
return []
# logging needs debug flag
self.loop.set_debug(True)
# Log to INFO level if timeout > 1.0 sec.
self.loop._selector.select = slow_select
self.loop._process_events = mock.Mock()
self.loop._run_once()
self.assertEqual(logging.INFO, m_logger.log.call_args[0][0])
def fast_select(timeout):
time.sleep(0.001)
return []
self.loop._selector.select = fast_select
self.loop._run_once()
self.assertEqual(logging.DEBUG, m_logger.log.call_args[0][0])
def test__run_once_schedule_handle(self):
handle = None
processed = False
def cb(loop):
nonlocal processed, handle
processed = True
handle = loop.call_soon(lambda: True)
h = asyncio.TimerHandle(time.monotonic() - 1, cb, (self.loop,),
self.loop, None)
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h)
self.loop._run_once()
self.assertTrue(processed)
self.assertEqual([handle], list(self.loop._ready))
def test__run_once_cancelled_event_cleanup(self):
self.loop._process_events = mock.Mock()
self.assertTrue(
0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0)
def cb():
pass
# Set up one "blocking" event that will not be cancelled to
# ensure later cancelled events do not make it to the head
# of the queue and get cleaned.
not_cancelled_count = 1
self.loop.call_later(3000, cb)
# Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES)
# cancelled handles, ensure they aren't removed
cancelled_count = 2
for x in range(2):
h = self.loop.call_later(3600, cb)
h.cancel()
# Add some cancelled events that will be at head and removed
cancelled_count += 2
for x in range(2):
h = self.loop.call_later(100, cb)
h.cancel()
# This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low
self.assertLessEqual(cancelled_count + not_cancelled_count,
base_events._MIN_SCHEDULED_TIMER_HANDLES)
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.loop._run_once()
cancelled_count -= 2
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
# Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION
# so that deletion of cancelled events will occur on next _run_once
add_cancel_count = int(math.ceil(
base_events._MIN_SCHEDULED_TIMER_HANDLES *
base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1
add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES -
add_cancel_count, 0)
# Add some events that will not be cancelled
not_cancelled_count += add_not_cancel_count
for x in range(add_not_cancel_count):
self.loop.call_later(3600, cb)
# Add enough cancelled events
cancelled_count += add_cancel_count
for x in range(add_cancel_count):
h = self.loop.call_later(3600, cb)
h.cancel()
# Ensure all handles are still scheduled
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
self.loop._run_once()
# Ensure cancelled events were removed
self.assertEqual(len(self.loop._scheduled), not_cancelled_count)
# Ensure only uncancelled events remain scheduled
self.assertTrue(all([not x._cancelled for x in self.loop._scheduled]))
def test_run_until_complete_type_error(self):
self.assertRaises(TypeError,
self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = asyncio.Future(loop=self.loop)
other_loop = self.new_test_loop()
self.addCleanup(other_loop.close)
self.assertRaises(ValueError,
other_loop.run_until_complete, task)
def test_run_until_complete_loop_orphan_future_close_loop(self):
class ShowStopper(BaseException):
pass
async def foo(delay):
await asyncio.sleep(delay, loop=self.loop)
def throw():
raise ShowStopper
self.loop._process_events = mock.Mock()
self.loop.call_soon(throw)
try:
self.loop.run_until_complete(foo(0.1))
except ShowStopper:
pass
# This call fails if run_until_complete does not clean up
# done-callback for the previous future.
self.loop.run_until_complete(foo(0.2))
def test_subprocess_exec_invalid_args(self):
args = [sys.executable, '-c', 'pass']
# missing program parameter (empty args)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol)
# expected multiple arguments, not a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, args)
# program arguments must be strings, not int
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, sys.executable, 123)
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, bufsize=4096)
def test_subprocess_shell_invalid_args(self):
# expected a string, not an int or a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 123)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, [sys.executable, '-c', 'pass'])
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', bufsize=4096)
def test_default_exc_handler_callback(self):
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1/0
# Test call_soon (events.Handle)
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
# Test call_later (events.TimerHandle)
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_coro(self):
self.loop._process_events = mock.Mock()
@asyncio.coroutine
def zero_error_coro():
yield from asyncio.sleep(0.01, loop=self.loop)
1/0
# Test Future.__del__
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop)
fut.add_done_callback(lambda *args: self.loop.stop())
self.loop.run_forever()
fut = None # Trigger Future.__del__ or futures._TracebackLogger
support.gc_collect()
if PY34:
# Future.__del__ in Python 3.4 logs error with
# an actual exception context
log.error.assert_called_with(
test_utils.MockPattern('.*exception was never retrieved'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
else:
# futures._TracebackLogger logs only textual traceback
log.error.assert_called_with(
test_utils.MockPattern(
'.*exception was never retrieved.*ZeroDiv'),
exc_info=False)
def test_set_exc_handler_invalid(self):
with self.assertRaisesRegex(TypeError, 'A callable object or None'):
self.loop.set_exception_handler('spam')
def test_set_exc_handler_custom(self):
def zero_error():
1/0
def run_loop():
handle = self.loop.call_soon(zero_error)
self.loop._run_once()
return handle
self.loop.set_debug(True)
self.loop._process_events = mock.Mock()
self.assertIsNone(self.loop.get_exception_handler())
mock_handler = mock.Mock()
self.loop.set_exception_handler(mock_handler)
self.assertIs(self.loop.get_exception_handler(), mock_handler)
handle = run_loop()
mock_handler.assert_called_with(self.loop, {
'exception': MOCK_ANY,
'message': test_utils.MockPattern(
'Exception in callback.*zero_error'),
'handle': handle,
'source_traceback': handle._source_traceback,
})
mock_handler.reset_mock()
self.loop.set_exception_handler(None)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
assert not mock_handler.called
def test_set_exc_handler_broken(self):
def run_loop():
def zero_error():
1/0
self.loop.call_soon(zero_error)
self.loop._run_once()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Unhandled error in exception handler'),
exc_info=(AttributeError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_broken(self):
_context = None
class Loop(base_events.BaseEventLoop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
1/0
loop.call_soon(zero_error)
loop._run_once()
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError, 'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
@asyncio.coroutine
def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio',
'loop = asyncio.get_event_loop()',
'print(loop.get_debug())'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'False')
# -X dev
sts, stdout, stderr = assert_python_ok('-E', '-X', 'dev',
'-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_create_task(self):
class MyTask(asyncio.Task):
pass
@asyncio.coroutine
def test():
pass
class EventLoop(base_events.BaseEventLoop):
def create_task(self, coro):
return MyTask(coro, loop=loop)
loop = EventLoop()
self.set_event_loop(loop)
coro = test()
task = asyncio.ensure_future(coro, loop=loop)
self.assertIsInstance(task, MyTask)
# make warnings quiet
task._log_destroy_pending = False
coro.close()
def test_run_forever_keyboard_interrupt(self):
# Python issue #22601: ensure that the temporary task created by
# run_forever() consumes the KeyboardInterrupt and so don't log
# a warning
@asyncio.coroutine
def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
self.loop.close()
support.gc_collect()
self.assertFalse(self.loop.call_exception_handler.called)
def test_run_until_complete_baseexception(self):
# Python issue #22429: run_until_complete() must not schedule a pending
# call to stop() if the future raised a BaseException
@asyncio.coroutine
def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
def func():
self.loop.stop()
func.called = True
func.called = False
try:
self.loop.call_soon(func)
self.loop.run_forever()
except KeyboardInterrupt:
pass
self.assertTrue(func.called)
def test_single_selecter_event_callback_after_stopping(self):
# Python issue #25593: A stopped event loop may cause event callbacks
# to run more than once.
event_sentinel = object()
callcount = 0
doer = None
def proc_events(event_list):
nonlocal doer
if event_sentinel in event_list:
doer = self.loop.call_soon(do_event)
def do_event():
nonlocal callcount
callcount += 1
self.loop.call_soon(clear_selector)
def clear_selector():
doer.cancel()
self.loop._selector.select.return_value = ()
self.loop._process_events = proc_events
self.loop._selector.select.return_value = (event_sentinel,)
for i in range(1, 3):
with self.subTest('Loop %d/2' % i):
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(callcount, 1)
def test_run_once(self):
# Simple test for test_utils.run_once(). It may seem strange
# to have a test for this (the function isn't even used!) but
# it's a de-factor standard API for library tests. This tests
# the idiom: loop.call_soon(loop.stop); loop.run_forever().
count = 0
def callback():
nonlocal count
count += 1
self.loop._process_events = mock.Mock()
self.loop.call_soon(callback)
test_utils.run_once(self.loop)
self.assertEqual(count, 1)
def test_run_forever_pre_stopped(self):
# Test that the old idiom for pre-stopping the loop works.
self.loop._process_events = mock.Mock()
self.loop.stop()
self.loop.run_forever()
self.loop._selector.select.assert_called_once_with(0)
class MyProto(asyncio.Protocol):
done = None
def __init__(self, create_future=False):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.Future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, create_future=False, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
@mock.patch('socket.getnameinfo')
def test_getnameinfo(self, m_gai):
m_gai.side_effect = lambda *args: 42
r = self.loop.run_until_complete(self.loop.getnameinfo(('abc', 123)))
self.assertEqual(r, 42)
@patch_socket
def test_create_connection_multiple_errors(self, m_socket):
class MyProto(asyncio.Protocol):
pass
@asyncio.coroutine
def getaddrinfo(*args, **kw):
yield from []
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
idx = -1
errors = ['err1', 'err2']
def _socket(*args, **kw):
nonlocal idx, errors
idx += 1
raise OSError(errors[idx])
m_socket.socket = _socket
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2')
@patch_socket
def test_create_connection_timeout(self, m_socket):
# Ensure that the socket is closed on timeout
sock = mock.Mock()
m_socket.socket.return_value = sock
def getaddrinfo(*args, **kw):
fut = asyncio.Future(loop=self.loop)
addr = (socket.AF_INET, socket.SOCK_STREAM, 0, '',
('127.0.0.1', 80))
fut.set_result([addr])
return fut
self.loop.getaddrinfo = getaddrinfo
with mock.patch.object(self.loop, 'sock_connect',
side_effect=asyncio.TimeoutError):
coro = self.loop.create_connection(MyProto, '127.0.0.1', 80)
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_connection_host_port_sock(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_connection(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_server(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_server(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
def test_create_server_stream_bittype(self):
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with sock:
coro = self.loop.create_server(lambda: None, sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_create_datagram_endpoint_wrong_sock(self):
sock = socket.socket(socket.AF_INET)
with sock:
coro = self.loop.create_datagram_endpoint(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A UDP Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_connection_no_host_port_sock(self):
coro = self.loop.create_connection(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_no_getaddrinfo(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
yield from []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_connect_err(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_multiple(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
@patch_socket
def test_create_connection_multiple_errors_local_addr(self, m_socket):
def bind(addr):
if addr[0] == '0.0.0.1':
err = OSError('Err')
err.strerror = 'Err'
raise err
m_socket.socket.return_value.bind = bind
@asyncio.coroutine
def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError('Err2')
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertTrue(str(cm.exception).startswith('Multiple exceptions: '))
self.assertTrue(m_socket.socket.return_value.close.called)
def _test_create_connection_ip_addr(self, m_socket, allow_inet_pton):
# Test the fallback code, even if this system has inet_pton.
if not allow_inet_pton:
del m_socket.inet_pton
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, '1.2.3.4', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('1.2.3.4', 80))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
sock.family = socket.AF_INET6
coro = self.loop.create_connection(asyncio.Protocol, '::1', 80)
t, p = self.loop.run_until_complete(coro)
try:
# Without inet_pton we use getaddrinfo, which transforms ('::1', 80)
# to ('::1', 80, 0, 0). The last 0s are flow info, scope id.
[address] = sock.connect.call_args[0]
host, port = address[:2]
self.assertRegex(host, r'::(0\.)*1')
self.assertEqual(port, 80)
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@patch_socket
def test_create_connection_ip_addr(self, m_socket):
self._test_create_connection_ip_addr(m_socket, True)
@patch_socket
def test_create_connection_no_inet_pton(self, m_socket):
self._test_create_connection_ip_addr(m_socket, False)
@patch_socket
def test_create_connection_service_name(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
for service, port in ('http', 80), (b'http', 80):
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('127.0.0.1', port))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
for service in 'nonsense', b'nonsense':
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
def test_create_connection_no_local_addr(self):
@asyncio.coroutine
def getaddrinfo(host, *args, **kw):
if host == 'example.com':
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
else:
return []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_connection_bluetooth(self, m_socket):
# See http://bugs.python.org/issue27136, fallback to getaddrinfo when
# we can't recognize an address is resolved, e.g. a Bluetooth address.
addr = ('00:01:02:03:04:05', 1)
def getaddrinfo(host, port, *args, **kw):
assert (host, port) == addr
return [(999, 1, 999, '', (addr, 1))]
m_socket.getaddrinfo = getaddrinfo
sock = m_socket.socket()
coro = self.loop.sock_connect(sock, addr)
self.loop.run_until_complete(coro)
def test_create_connection_ssl_server_hostname_default(self):
self.loop.getaddrinfo = mock.Mock()
def mock_getaddrinfo(*args, **kwds):
f = asyncio.Future(loop=self.loop)
f.set_result([(socket.AF_INET, socket.SOCK_STREAM,
socket.SOL_TCP, '', ('1.2.3.4', 80))])
return f
self.loop.getaddrinfo.side_effect = mock_getaddrinfo
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.return_value = self.loop.create_future()
self.loop.sock_connect.return_value.set_result(None)
self.loop._make_ssl_transport = mock.Mock()
class _SelectorTransportMock:
_sock = None
def get_extra_info(self, key):
return mock.Mock()
def close(self):
self._sock.close()
def mock_make_ssl_transport(sock, protocol, sslcontext, waiter,
**kwds):
waiter.set_result(None)
transport = _SelectorTransportMock()
transport._sock = sock
return transport
self.loop._make_ssl_transport.side_effect = mock_make_ssl_transport
ANY = mock.ANY
handshake_timeout = object()
# First try the default server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='python.org',
ssl_handshake_timeout=handshake_timeout)
# Next try an explicit server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
# Finally try an explicit empty server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
def test_create_connection_no_ssl_server_hostname_errors(self):
# When not using ssl, server_hostname must be None.
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='python.org')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_server_hostname_errors(self):
# When using ssl, server_hostname may be None if host is non-empty.
coro = self.loop.create_connection(MyProto, '', 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, None, 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
sock = socket.socket()
coro = self.loop.create_connection(MyProto, None, None,
ssl=True, sock=sock)
self.addCleanup(sock.close)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_server_empty_host(self):
# if host is empty string use None instead
host = object()
@asyncio.coroutine
def getaddrinfo(*args, **kw):
nonlocal host
host = args[0]
yield from []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
fut = self.loop.create_server(MyProto, '', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertIsNone(host)
def test_create_server_host_port_sock(self):
fut = self.loop.create_server(
MyProto, '0.0.0.0', 0, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_host_port_sock(self):
fut = self.loop.create_server(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_getaddrinfo(self):
getaddrinfo = self.loop.getaddrinfo = mock.Mock()
getaddrinfo.return_value = self.loop.create_future()
getaddrinfo.return_value.set_result(None)
f = self.loop.create_server(MyProto, 'python.org', 0)
self.assertRaises(OSError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_nosoreuseport(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_soreuseport_only_defined(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.return_value = mock.Mock()
m_socket.SO_REUSEPORT = -1
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_cant_bind(self, m_socket):
class Err(OSError):
strerror = 'error'
m_socket.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 10100))]
m_socket.getaddrinfo._is_coroutine = False
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_server(MyProto, '0.0.0.0', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
@patch_socket
def test_create_datagram_endpoint_no_addrinfo(self, m_socket):
m_socket.getaddrinfo.return_value = []
m_socket.getaddrinfo._is_coroutine = False
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_addr_error(self):
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr='localhost')
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 1, 2, 3))
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_connect_err(self):
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_socket_err(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_no_matching_family(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol,
remote_addr=('127.0.0.1', 0), local_addr=('::1', 0))
self.assertRaises(
ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_setblk_err(self, m_socket):
m_socket.socket.return_value.setblocking.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
self.assertTrue(
m_socket.socket.return_value.close.called)
def test_create_datagram_endpoint_noaddr_nofamily(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_cant_bind(self, m_socket):
class Err(OSError):
pass
m_socket.getaddrinfo = socket.getaddrinfo
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_datagram_endpoint(
MyDatagramProto,
local_addr=('127.0.0.1', 0), family=socket.AF_INET)
self.assertRaises(Err, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
def test_create_datagram_endpoint_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', 0))
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
sock=sock)
transport, protocol = self.loop.run_until_complete(fut)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_datagram_endpoint_sock_unix(self):
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(fut)
assert transport._sock.family == socket.AF_UNIX
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
def test_create_datagram_endpoint_sock_sockopts(self):
class FakeSock:
type = socket.SOCK_DGRAM
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, family=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, proto=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, flags=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_address=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_port=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, allow_broadcast=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_datagram_endpoint_sockopts(self):
# Socket options should not be applied unless asked for.
# SO_REUSEADDR defaults to on for UNIX.
# SO_REUSEPORT is not available on all platforms.
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0))
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
reuse_address_default_on = (
os.name == 'posix' and sys.platform != 'cygwin')
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
if reuse_address_default_on:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
else:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=True,
reuse_port=reuseport_supported,
allow_broadcast=True)
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_nosoreuseport(self, m_socket):
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=False,
reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_ip_addr(self, m_socket):
def getaddrinfo(*args, **kw):
self.fail('should not have called getaddrinfo')
m_socket.getaddrinfo = getaddrinfo
m_socket.socket.return_value.bind = bind = mock.Mock()
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('1.2.3.4', 0),
reuse_address=False,
reuse_port=reuseport_supported)
t, p = self.loop.run_until_complete(coro)
try:
bind.assert_called_with(('1.2.3.4', 0))
m_socket.socket.assert_called_with(family=m_socket.AF_INET,
proto=m_socket.IPPROTO_UDP,
type=m_socket.SOCK_DGRAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
def test_accept_connection_retry(self):
sock = mock.Mock()
sock.accept.side_effect = BlockingIOError()
self.loop._accept_connection(MyProto, sock)
self.assertFalse(sock.close.called)
@mock.patch('asyncio.base_events.logger')
def test_accept_connection_exception(self, m_log):
sock = mock.Mock()
sock.fileno.return_value = 10
sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files')
self.loop._remove_reader = mock.Mock()
self.loop.call_later = mock.Mock()
self.loop._accept_connection(MyProto, sock)
self.assertTrue(m_log.error.called)
self.assertFalse(sock.close.called)
self.loop._remove_reader.assert_called_with(10)
self.loop.call_later.assert_called_with(
constants.ACCEPT_RETRY_DELAY,
# self.loop._start_serving
mock.ANY,
MyProto, sock, None, None, mock.ANY, mock.ANY)
def test_call_coroutine(self):
@asyncio.coroutine
def simple_coroutine():
pass
self.loop.set_debug(True)
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
with self.assertRaises(TypeError):
self.loop.call_soon(func)
with self.assertRaises(TypeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(TypeError):
self.loop.call_later(60, func)
with self.assertRaises(TypeError):
self.loop.call_at(self.loop.time() + 60, func)
with self.assertRaises(TypeError):
self.loop.run_until_complete(
self.loop.run_in_executor(None, func))
@mock.patch('asyncio.base_events.logger')
def test_log_slow_callbacks(self, m_logger):
def stop_loop_cb(loop):
loop.stop()
@asyncio.coroutine
def stop_loop_coro(loop):
yield from ()
loop.stop()
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.0
# slow callback
self.loop.call_soon(stop_loop_cb, self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Handle.*stop_loop_cb.*> "
"took .* seconds$")
# slow task
asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Task.*stop_loop_coro.*> "
"took .* seconds$")
class RunningLoopTests(unittest.TestCase):
def test_running_loop_within_a_loop(self):
@asyncio.coroutine
def runner(loop):
loop.run_forever()
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
class BaseLoopSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
def connection_made(self, transport):
self.started = True
self.transport = transport
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
self.transport = None
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
with open(support.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
super().tearDownClass()
def setUp(self):
from asyncio.selector_events import BaseSelectorEventLoop
# BaseSelectorEventLoop() has no native implementation
self.loop = BaseSelectorEventLoop()
self.set_event_loop(self.loop)
self.file = open(support.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, blocking=False):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(blocking)
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
server = self.run_loop(self.loop.create_server(
lambda: proto, support.HOST, 0, family=socket.AF_INET))
addr = server.sockets[0].getsockname()
for _ in range(10):
try:
self.run_loop(self.loop.sock_connect(sock, addr))
except OSError:
self.run_loop(asyncio.sleep(0.5))
continue
else:
break
else:
# One last try, so we get the exception
self.run_loop(self.loop.sock_connect(sock, addr))
def cleanup():
server.close()
self.run_loop(server.wait_closed())
sock.close()
if proto.transport is not None:
proto.transport.close()
self.run_loop(proto.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test__sock_sendfile_native_failure(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(events.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(proto.data, b'')
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_no_fallback(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(events.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop.sock_sendfile(sock, self.file,
fallback=False))
self.assertEqual(self.file.tell(), 0)
self.assertEqual(proto.data, b'')
def test_sock_sendfile_fallback(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, len(self.DATA))
self.assertEqual(self.file.tell(), len(self.DATA))
self.assertEqual(proto.data, self.DATA)
def test_sock_sendfile_fallback_offset_and_count(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file,
1000, 2000))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, 2000)
self.assertEqual(self.file.tell(), 3000)
self.assertEqual(proto.data, self.DATA[1000:3000])
def test_blocking_socket(self):
self.loop.set_debug(True)
sock = self.make_socket(blocking=True)
with self.assertRaisesRegex(ValueError, "must be non-blocking"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_nonbinary_file(self):
sock = self.make_socket()
with open(support.TESTFN, 'r') as f:
with self.assertRaisesRegex(ValueError, "binary mode"):
self.run_loop(self.loop.sock_sendfile(sock, f))
def test_nonstream_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(False)
self.addCleanup(sock.close)
with self.assertRaisesRegex(ValueError, "only SOCK_STREAM type"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_notint_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, 'count'))
def test_negative_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, -1))
def test_notint_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 'offset'))
def test_negative_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, -1))
if __name__ == '__main__':
unittest.main()
|
run.py
|
"""
2 THREADS:
single thread calc 245.961
single thread calc_release 242.391
2 thread calc 251.481
2 thread calc_release 130.035
4 THREADS:
single thread calc 253.464
single thread calc_release 247.712
4 thread calc 251.756
4 thread calc_release 103.160
single thread calc 242.183
single thread calc_release 242.092
4 thread calc 248.529
4 thread calc_release 98.867
"""
import time
import threading
import functools
from gil import calc, calc_release
REPEAT = 100
INPUT_N = 43
EXPECTED_RESULT = 433494437
THREAD_NUMBER = 4
def measure(repeat, func, arg, expected):
start = time.time()
for _ in range(repeat):
result = func(arg)
if result != expected:
raise AssertionError('{} != {} by func {}'.format(
result, expected, func.__name__
))
assert result == expected
return time.time() - start
def run_single_thread():
print('single thread calc {:.3f}'.format(
measure(REPEAT, calc, INPUT_N, EXPECTED_RESULT)
))
print('single thread calc_release {:.3f}'.format(
measure(REPEAT, calc_release, INPUT_N, EXPECTED_RESULT)
))
def measure_multi_thread(func):
repeat = REPEAT / THREAD_NUMBER
repeat_int = int(repeat)
assert repeat_int == repeat
target = functools.partial(
measure, repeat_int, func, INPUT_N, EXPECTED_RESULT
)
threads = [threading.Thread(target=target) for _ in range(THREAD_NUMBER)]
start = time.time()
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return time.time() - start
def run_multi_thread():
print('{:d} thread calc {:.3f}'.format(
THREAD_NUMBER, measure_multi_thread(calc))
)
print('{:d} thread calc_release {:.3f}'.format(
THREAD_NUMBER, measure_multi_thread(calc_release))
)
def main():
run_single_thread()
run_multi_thread()
if __name__ == '__main__':
main()
|
test_utils.py
|
"""Utilities shared by tests."""
import collections
import contextlib
import io
import logging
import os
import re
import socket
import socketserver
import sys
import tempfile
import threading
import time
import unittest
import weakref
from unittest import mock
from http.server import HTTPServer
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from . import compat
from . import events
from . import futures
from . import selectors
from . import tasks
from .coroutines import coroutine
from .log import logger
from test import support
if sys.platform == 'win32': # pragma: no cover
from .windows_utils import socketpair
else:
from socket import socketpair # pragma: no cover
def dummy_ssl_context():
if ssl is None:
return None
else:
return ssl.SSLContext(ssl.PROTOCOL_SSLv23)
def run_briefly(loop):
@coroutine
def once():
pass
gen = once()
t = loop.create_task(gen)
# Don't log a warning if the task is not done after run_until_complete().
# It occurs if the loop is stopped or if a task raises a BaseException.
t._log_destroy_pending = False
try:
loop.run_until_complete(t)
finally:
gen.close()
def run_until(loop, pred, timeout=30):
deadline = time.time() + timeout
while not pred():
if timeout is not None:
timeout = deadline - time.time()
if timeout <= 0:
raise futures.TimeoutError()
loop.run_until_complete(tasks.sleep(0.001, loop=loop))
def run_once(loop):
"""Legacy API to run once through the event loop.
This is the recommended pattern for test code. It will poll the
selector once and run all callbacks scheduled in response to I/O
events.
"""
loop.call_soon(loop.stop)
loop.run_forever()
class SilentWSGIRequestHandler(WSGIRequestHandler):
def get_stderr(self):
return io.StringIO()
def log_message(self, format, *args):
pass
class SilentWSGIServer(WSGIServer):
request_timeout = 2
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
return request, client_addr
def handle_error(self, request, client_address):
pass
class SSLWSGIServerMixin:
def finish_request(self, request, client_address):
# The relative location of our test directory (which
# contains the ssl key and certificate files) differs
# between the stdlib and stand-alone asyncio.
# Prefer our own if we can find it.
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
if not os.path.isdir(here):
here = os.path.join(os.path.dirname(os.__file__),
'test', 'test_asyncio')
keyfile = os.path.join(here, 'ssl_key.pem')
certfile = os.path.join(here, 'ssl_cert.pem')
ssock = ssl.wrap_socket(request,
keyfile=keyfile,
certfile=certfile,
server_side=True)
try:
self.RequestHandlerClass(ssock, client_address, self)
ssock.close()
except OSError:
# maybe socket has been closed by peer
pass
class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
pass
def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
def app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
return [b'Test message']
# Run the test WSGI server in a separate thread in order not to
# interfere with event handling in the main thread
server_class = server_ssl_cls if use_ssl else server_cls
httpd = server_class(address, SilentWSGIRequestHandler)
httpd.set_app(app)
httpd.address = httpd.server_address
server_thread = threading.Thread(
target=lambda: httpd.serve_forever(poll_interval=0.05))
server_thread.start()
try:
yield httpd
finally:
httpd.shutdown()
httpd.server_close()
server_thread.join()
if hasattr(socket, 'AF_UNIX'):
class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
def server_bind(self):
socketserver.UnixStreamServer.server_bind(self)
self.server_name = '127.0.0.1'
self.server_port = 80
class UnixWSGIServer(UnixHTTPServer, WSGIServer):
request_timeout = 2
def server_bind(self):
UnixHTTPServer.server_bind(self)
self.setup_environ()
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
# Code in the stdlib expects that get_request
# will return a socket and a tuple (host, port).
# However, this isn't true for UNIX sockets,
# as the second return value will be a path;
# hence we return some fake data sufficient
# to get the tests going
return request, ('127.0.0.1', '')
class SilentUnixWSGIServer(UnixWSGIServer):
def handle_error(self, request, client_address):
pass
class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
pass
def gen_unix_socket_path():
with tempfile.NamedTemporaryFile() as file:
return file.name
@contextlib.contextmanager
def unix_socket_path():
path = gen_unix_socket_path()
try:
yield path
finally:
try:
os.unlink(path)
except OSError:
pass
@contextlib.contextmanager
def run_test_unix_server(*, use_ssl=False):
with unix_socket_path() as path:
yield from _run_test_server(address=path, use_ssl=use_ssl,
server_cls=SilentUnixWSGIServer,
server_ssl_cls=UnixSSLWSGIServer)
@contextlib.contextmanager
def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
server_cls=SilentWSGIServer,
server_ssl_cls=SSLWSGIServer)
def make_test_protocol(base):
dct = {}
for name in dir(base):
if name.startswith('__') and name.endswith('__'):
# skip magic names
continue
dct[name] = MockCallback(return_value=None)
return type('TestProtocol', (base,) + base.__bases__, dct)()
class TestSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout):
return []
def get_map(self):
return self.keys
class TestLoop(base_events.BaseEventLoop):
"""Loop for unittests.
It manages self time directly.
If something scheduled to be executed later then
on next loop iteration after all ready handlers done
generator passed to __init__ is calling.
Generator should be like this:
def gen():
...
when = yield ...
... = yield time_advance
Value returned by yield is absolute time of next scheduled handler.
Value passed to yield is time advance to move loop's time forward.
"""
def __init__(self, gen=None):
super().__init__()
if gen is None:
def gen():
yield
self._check_on_close = False
else:
self._check_on_close = True
self._gen = gen()
next(self._gen)
self._time = 0
self._clock_resolution = 1e-9
self._timers = []
self._selector = TestSelector()
self.readers = {}
self.writers = {}
self.reset_counters()
self._transports = weakref.WeakValueDictionary()
def time(self):
return self._time
def advance_time(self, advance):
"""Move test time forward."""
if advance:
self._time += advance
def close(self):
super().close()
if self._check_on_close:
try:
self._gen.send(0)
except StopIteration:
pass
else: # pragma: no cover
raise AssertionError("Time generator is not finished")
def _add_reader(self, fd, callback, *args):
self.readers[fd] = events.Handle(callback, args, self)
def _remove_reader(self, fd):
self.remove_reader_count[fd] += 1
if fd in self.readers:
del self.readers[fd]
return True
else:
return False
def assert_reader(self, fd, callback, *args):
assert fd in self.readers, 'fd {} is not registered'.format(fd)
handle = self.readers[fd]
assert handle._callback == callback, '{!r} != {!r}'.format(
handle._callback, callback)
assert handle._args == args, '{!r} != {!r}'.format(
handle._args, args)
def _add_writer(self, fd, callback, *args):
self.writers[fd] = events.Handle(callback, args, self)
def _remove_writer(self, fd):
self.remove_writer_count[fd] += 1
if fd in self.writers:
del self.writers[fd]
return True
else:
return False
def assert_writer(self, fd, callback, *args):
assert fd in self.writers, 'fd {} is not registered'.format(fd)
handle = self.writers[fd]
assert handle._callback == callback, '{!r} != {!r}'.format(
handle._callback, callback)
assert handle._args == args, '{!r} != {!r}'.format(
handle._args, args)
def _ensure_fd_no_transport(self, fd):
try:
transport = self._transports[fd]
except KeyError:
pass
else:
raise RuntimeError(
'File descriptor {!r} is used by transport {!r}'.format(
fd, transport))
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._ensure_fd_no_transport(fd)
return self._add_reader(fd, callback, *args)
def remove_reader(self, fd):
"""Remove a reader callback."""
self._ensure_fd_no_transport(fd)
return self._remove_reader(fd)
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._ensure_fd_no_transport(fd)
return self._add_writer(fd, callback, *args)
def remove_writer(self, fd):
"""Remove a writer callback."""
self._ensure_fd_no_transport(fd)
return self._remove_writer(fd)
def reset_counters(self):
self.remove_reader_count = collections.defaultdict(int)
self.remove_writer_count = collections.defaultdict(int)
def _run_once(self):
super()._run_once()
for when in self._timers:
advance = self._gen.send(when)
self.advance_time(advance)
self._timers = []
def call_at(self, when, callback, *args):
self._timers.append(when)
return super().call_at(when, callback, *args)
def _process_events(self, event_list):
return
def _write_to_self(self):
pass
def MockCallback(**kwargs):
return mock.Mock(spec=['__call__'], **kwargs)
class MockPattern(str):
"""A regex based str with a fuzzy __eq__.
Use this helper with 'mock.assert_called_with', or anywhere
where a regex comparison between strings is needed.
For instance:
mock_call.assert_called_with(MockPattern('spam.*ham'))
"""
def __eq__(self, other):
return bool(re.search(str(self), other, re.S))
def get_function_source(func):
source = events._get_function_source(func)
if source is None:
raise ValueError("unable to get the source of %r" % (func,))
return source
class TestCase(unittest.TestCase):
def set_event_loop(self, loop, *, cleanup=True):
assert loop is not None
# ensure that the event loop is passed explicitly in asyncio
events.set_event_loop(None)
if cleanup:
self.addCleanup(loop.close)
def new_test_loop(self, gen=None):
loop = TestLoop(gen)
self.set_event_loop(loop)
return loop
def unpatch_get_running_loop(self):
events._get_running_loop = self._get_running_loop
def setUp(self):
self._get_running_loop = events._get_running_loop
events._get_running_loop = lambda: None
self._thread_cleanup = support.threading_setup()
def tearDown(self):
self.unpatch_get_running_loop()
events.set_event_loop(None)
# Detect CPython bug #23353: ensure that yield/yield-from is not used
# in an except block of a generator
self.assertEqual(sys.exc_info(), (None, None, None))
self.doCleanups()
support.threading_cleanup(*self._thread_cleanup)
support.reap_children()
if not compat.PY34:
# Python 3.3 compatibility
def subTest(self, *args, **kwargs):
class EmptyCM:
def __enter__(self):
pass
def __exit__(self, *exc):
pass
return EmptyCM()
@contextlib.contextmanager
def disable_logger():
"""Context manager to disable asyncio logger.
For example, it can be used to ignore warnings in debug mode.
"""
old_level = logger.level
try:
logger.setLevel(logging.CRITICAL+1)
yield
finally:
logger.setLevel(old_level)
def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM,
family=socket.AF_INET):
"""Create a mock of a non-blocking socket."""
sock = mock.MagicMock(socket.socket)
sock.proto = proto
sock.type = type
sock.family = family
sock.gettimeout.return_value = 0.0
return sock
def force_legacy_ssl_support():
return mock.patch('asyncio.sslproto._is_sslproto_available',
return_value=False)
|
single_process_with_api.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage:
single_process_with_api [options]
Options:
-h, --help Show this page
--debug Show debug logging
--verbose Show verbose logging
-n=<i> n [default: 1000000]
--partition=<p> Partition
--key=<k> Key
--csv1=<c1> producer results csv [default: single-process-with-api-producer.csv]
--csv2=<c2> consumer results csv [default: single-process-with-api-consumer.csv]
"""
from docopt import docopt
import logging
import sys
import time
import fake_kafka
import asyncio
import csv
import threading
import fake_kafka.api
from uvicorn.config import Config
from uvicorn.main import Server
logger = logging.getLogger('single_process')
async def produce_messages(n, key, partition, csv1):
producer = fake_kafka.AIOKafkaProducer(bootstrap_servers=['http://127.0.0.1:8000'])
await producer.start()
if partition is not None:
partition = int(partition)
start = time.time()
for i in range(n):
await producer.send_and_wait("my_topic", "Super message", key=key, partition=partition)
end = time.time()
print("{} messages sent in {} s for {} ns/m or {} m/s".format(n, end - start, (end - start) * 1000000 / n, int(n / (end - start))))
await producer.stop()
with open(csv1, 'a') as f:
writer = csv.writer(f)
writer.writerow([n, end - start, (end - start) * 1000000 / n, int(n / (end - start))])
async def consume_messages(n, key, partition, csv2):
consumer = fake_kafka.AIOKafkaConsumer("my_topic", bootstrap_servers=['http://127.0.0.1:8000'], group_id='a')
await consumer.start()
start = time.time()
count = 0
try:
async for msg in consumer: # noqa
count += 1
if count >= n:
break
assert count == n, 'Did not receive expected number of messages'
end = time.time()
finally:
await consumer.stop()
print("{} messages recieved in {} s for {} ns/m or {} m/s".format(n, end - start, (end - start) * 1000000 / n, int(n / (end - start))))
with open(csv2, 'a') as f:
writer = csv.writer(f)
writer.writerow([n, end - start, (end - start) * 1000000 / n, int(n / (end - start))])
def start_server(limit_max_requests):
class CustomServer(Server):
def install_signal_handlers(self):
pass
config = Config(app=fake_kafka.api.app,
loop="asyncio",
limit_max_requests=limit_max_requests)
server = CustomServer(config=config)
thread = threading.Thread(target=server.run)
thread.start()
while not server.started:
time.sleep(0.01)
return thread
def main(args=None):
if args is None:
args = sys.argv[1:]
parsed_args = docopt(__doc__, args)
if parsed_args['--debug']:
logging.basicConfig(level=logging.DEBUG)
elif parsed_args['--verbose']:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
start_server(int(parsed_args['-n']) * 2 + 2)
loop = asyncio.get_event_loop()
print('single_process n: {} key: {} partition: {}'.format(parsed_args['-n'], parsed_args['--key'], parsed_args['--partition']))
loop.run_until_complete(produce_messages(int(parsed_args['-n']), parsed_args['--key'], parsed_args['--partition'], parsed_args['--csv1']))
loop.run_until_complete(consume_messages(int(parsed_args['-n']), parsed_args['--key'], parsed_args['--partition'], parsed_args['--csv2']))
loop.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
SQL_to_JSON.py
|
import sys
from subprocess import Popen, PIPE
from threading import Thread
from queue import Queue, Empty
mongoQuery = "db."
SQL_Logical_Operators = {"AND": "$and", "NOT": "$not", "OR": "$or"}
SQL_Comparison_Operators = {"=": "$eq", "!=": "$ne", "<>": "$ne", ">": "$gt", "<": "$lt", ">=": "$gte", "<=": "$lte", "!<": "$gt", "!>": "$lt", "NEQ": "$ne", "GEQ": "$gte", "LEQ": "$lte", "GTR": "$gt", "LSS": "$lt"}
SQL_String = ["char", "nchar", "varchar", "nvarchar"]
SQL_Int = ["int", "bigint", "decimal", "numeric", "number"]
def select_query():
global mongoQuery, tableName
def s_select():
after_select = sqlQuery[sqlQuery.index("SELECT") + 1]
if after_select == "*":
return ""
else:
s_select_ret = ""
after_select = sqlQuery[sqlQuery.index("SELECT") + 1:sqlQuery.index("FROM")]
for w in after_select:
if w[len(w)-1] == ",":
s_select_ret += w[:-1] + ": 1, "
else:
s_select_ret += w + ": 1"
return "{ " + s_select_ret + " }"
def s_from():
return sqlQuery[sqlQuery.index("FROM") + 1].lower()
def s_where():
s_where_ret = ""
try:
after_where = sqlQuery[sqlQuery.index("WHERE") + 1:]
logical_op = None
logical_op_flag = False
i = 0
while i != len(after_where):
key = after_where[i]
comp_op = after_where[i+1]
val = after_where[i+2]
i += 3
if len(after_where) > 3 and i < len(after_where):
logical_op_flag = True
logical_op = after_where[i]
i += 1
else:
logical_op = None
if logical_op:
if logical_op in SQL_Logical_Operators:
s_where_ret += " { " + SQL_Logical_Operators[logical_op] + ": [ "
else:
raise QueryError
if comp_op in SQL_Comparison_Operators:
s_where_ret += "{" + key + ": {" + SQL_Comparison_Operators[comp_op] + ": " + val + "} }"
if logical_op:
s_where_ret += " , "
else:
raise QueryError
if logical_op_flag:
s_where_ret += " ] } "
except ValueError:
return "{ }"
except:
s_where_ret = -1
return s_where_ret
tableName = s_from()
where_part = s_where()
select_part = s_select()
if where_part == -1:
raise QueryError
if select_part == "":
return mongoQuery + tableName + ".find(" + where_part + select_part + ")"
return mongoQuery + tableName + ".find(" + where_part + " , " + select_part + ")"
# .split("(", 1)[0]
def create_query():
global mongoQuery, tableName
tableName = sqlQuery[sqlQuery.index("TABLE") + 1].split("(", 1)[0]
return mongoQuery + "createCollection(\"" + tableName + "\")"
def delete_query():
def d_from():
return sqlQuery[sqlQuery.index("FROM") + 1].lower()
def d_where():
d_where_ret = ""
try:
after_where = sqlQuery[sqlQuery.index("WHERE") + 1:]
logical_op = None
logical_op_flag = False
i = 0
while i != len(after_where):
key = after_where[i]
comp_op = after_where[i + 1]
val = after_where[i + 2]
i += 3
if len(after_where) > 3 and i < len(after_where):
logical_op_flag = True
logical_op = after_where[i]
i += 1
else:
logical_op = None
if logical_op:
if logical_op in SQL_Logical_Operators:
d_where_ret += " { " + SQL_Logical_Operators[logical_op] + ": [ "
else:
raise QueryError
if comp_op in SQL_Comparison_Operators:
d_where_ret += "{" + key + ": {" + SQL_Comparison_Operators[comp_op] + ": " + val + "} }"
if logical_op:
d_where_ret += " , "
else:
raise QueryError
if logical_op_flag:
d_where_ret += " ] } "
except ValueError:
return "{ }"
except:
raise QueryError
return d_where_ret
tableName = d_from()
return mongoQuery + tableName + ".deleteMany( " + d_where() + " )"
def insert_query():
global sqlQuery, tableName
def i_into():
return sqlQuery[sqlQuery.index("INTO") + 1].lower().split("(", 1)[0]
def i_values():
global insert, sqlQuery, tableName
i_values_ret = ""
insert = "insertOne"
i_values_ret_flag = False
tableName = i_into()
col = ' '.join(sqlQuery)
col = col[col.index(tableName)+len(tableName)+1:]
col = col[:col.index(")")].split(",")
val = sqlQuery[sqlQuery.index("VALUES") + 1:]
val = ''.join(val).replace("(", "")
val = val.replace(")", "").split(",")
if len(val) > len(col):
insert = "insertMany"
i_values_ret += "[ "
i_values_ret_flag = True
d = dict()
for x in range(int(len(val)/len(col))): # 0 - 1
for i in range(len(col)): # 0 - 2
d[col[i]] = val[i+x*len(col)]
i_values_ret += str(d)
if i_values_ret_flag:
i_values_ret += " , "
if i_values_ret_flag:
i_values_ret = i_values_ret[:-3]
i_values_ret += " ]"
return i_values_ret.replace("'", ""), insert
tableName = i_into()
i_val_ret = i_values()
return mongoQuery + tableName + "." + i_val_ret[1] + "( " + i_val_ret[0] + " )"
functionSwitch = {"SELECT": select_query, # SELECT .... FROM .... WHERE
"CREATE": create_query, # CREATE TABLE .(NAME). ( comma seperated values )
"DELETE": delete_query, # DELETE FROM .(NAME). WHERE
"INSERT": insert_query, # INSERT INTO .(NAME). ( WHERE TO INSERT ? ) --> VALUES ( WHAT TO INSERT? )
"UPDATE": ()} # UPDATE .(NAME). SET (WHO TO SET and WHAT) WHERE ....
class QueryError(Exception):
pass
class NonBlockingStreamReader:
def __init__(self, stream=None):
# stream: the stream to read from.
# Usually a process' stdout or stderr.
self.is_running = False
self._s = stream
self._q = Queue()
self._t = None
@staticmethod
def _populate_queue(stream, queue):
# Collect lines from 'stream' and put them in 'queue'.
while True:
try:
line = stream.readline()
except ValueError:
break
if line:
queue.put(line)
else:
pass
def start(self):
self.is_running = True
self._t = Thread(target=self._populate_queue, args=(self._s, self._q))
self._t.daemon = True
self._t.start() # start collecting lines from the stream
def get_is_running(self):
return self.is_running
def set_stream(self, s):
self._s = s
def get_stream(self):
return self._s
def readline(self, timeout=None):
try:
return self._q.get(block=timeout is not None, timeout=timeout)
except Empty:
pass
class UnexpectedEndOfStream(Exception):
pass
def readNonBlocking():
global nonBlocking
str = ""
while True:
output = nonBlocking.readline(0.5)
# 0.1 secs to let the shell output the result
if not output:
return str
str += output
sqlQuery = []
# sqlQuery = sys.argv[1:]
sqlQuery = "INSERT INTO people(user_id, age, status) VALUES ('bcd001', 45, 'A')".split()
if sqlQuery[0] in functionSwitch:
try:
mongoQuery = functionSwitch[sqlQuery[0]]()
except QueryError:
mongoQuery = "Unable to convert query file"
else:
mongoQuery = "Unable to convert query file"
# print(mongoQuery)
server = "mongodb://localhost:27017"
process = Popen(["mongo", server], stdin=PIPE, stdout=PIPE, shell=False, universal_newlines=True)
nonBlocking = NonBlockingStreamReader(stream=process.stdout)
nonBlocking.start()
readNonBlocking()
process.communicate(mongoQuery)[0]
answer = readNonBlocking()
print(answer, end='')
import sys
from subprocess import Popen, PIPE
from threading import Thread
from queue import Queue, Empty
mongoQuery = "db."
SQL_Logical_Operators = {"AND": "$and", "NOT": "$not", "OR": "$or"}
SQL_Comparison_Operators = {"=": "$eq", "!=": "$ne", "<>": "$ne", ">": "$gt", "<": "$lt", ">=": "$gte", "<=": "$lte", "!<": "$gt", "!>": "$lt", "NEQ": "$ne", "GEQ": "$gte", "LEQ": "$lte", "GTR": "$gt", "LSS": "$lt"}
SQL_String = ["char", "nchar", "varchar", "nvarchar"]
SQL_Int = ["int", "bigint", "decimal", "numeric", "number"]
def select_query():
global mongoQuery, tableName
def s_select():
after_select = sqlQuery[sqlQuery.index("SELECT") + 1]
if after_select == "*":
return ""
else:
s_select_ret = ""
after_select = sqlQuery[sqlQuery.index("SELECT") + 1:sqlQuery.index("FROM")]
for w in after_select:
if w[len(w)-1] == ",":
s_select_ret += w[:-1] + ": 1, "
else:
s_select_ret += w + ": 1"
return "{ " + s_select_ret + " }"
def s_from():
return sqlQuery[sqlQuery.index("FROM") + 1].lower()
def s_where():
s_where_ret = ""
try:
after_where = sqlQuery[sqlQuery.index("WHERE") + 1:]
logical_op = None
logical_op_flag = False
i = 0
while i != len(after_where):
key = after_where[i]
comp_op = after_where[i+1]
val = after_where[i+2]
i += 3
if len(after_where) > 3 and i < len(after_where):
logical_op_flag = True
logical_op = after_where[i]
i += 1
else:
logical_op = None
if logical_op:
if logical_op in SQL_Logical_Operators:
s_where_ret += " { " + SQL_Logical_Operators[logical_op] + ": [ "
else:
raise QueryError
if comp_op in SQL_Comparison_Operators:
s_where_ret += "{" + key + ": {" + SQL_Comparison_Operators[comp_op] + ": " + val + "} }"
if logical_op:
s_where_ret += " , "
else:
raise QueryError
if logical_op_flag:
s_where_ret += " ] } "
except ValueError:
return "{ }"
except:
s_where_ret = -1
return s_where_ret
tableName = s_from()
where_part = s_where()
select_part = s_select()
if where_part == -1:
raise QueryError
if select_part == "":
return mongoQuery + tableName + ".find(" + where_part + select_part + ")"
return mongoQuery + tableName + ".find(" + where_part + " , " + select_part + ")"
# .split("(", 1)[0]
def create_query():
global mongoQuery, tableName
tableName = sqlQuery[sqlQuery.index("TABLE") + 1].split("(", 1)[0]
return mongoQuery + "createCollection(\"" + tableName + "\")"
def delete_query():
def d_from():
return sqlQuery[sqlQuery.index("FROM") + 1].lower()
def d_where():
d_where_ret = ""
try:
after_where = sqlQuery[sqlQuery.index("WHERE") + 1:]
logical_op = None
logical_op_flag = False
i = 0
while i != len(after_where):
key = after_where[i]
comp_op = after_where[i + 1]
val = after_where[i + 2]
i += 3
if len(after_where) > 3 and i < len(after_where):
logical_op_flag = True
logical_op = after_where[i]
i += 1
else:
logical_op = None
if logical_op:
if logical_op in SQL_Logical_Operators:
d_where_ret += " { " + SQL_Logical_Operators[logical_op] + ": [ "
else:
raise QueryError
if comp_op in SQL_Comparison_Operators:
d_where_ret += "{" + key + ": {" + SQL_Comparison_Operators[comp_op] + ": " + val + "} }"
if logical_op:
d_where_ret += " , "
else:
raise QueryError
if logical_op_flag:
d_where_ret += " ] } "
except ValueError:
return "{ }"
except:
raise QueryError
return d_where_ret
tableName = d_from()
return mongoQuery + tableName + ".deleteMany( " + d_where() + " )"
def insert_query():
global sqlQuery, tableName
def i_into():
return sqlQuery[sqlQuery.index("INTO") + 1].lower().split("(", 1)[0]
def i_values():
global insert, sqlQuery, tableName
i_values_ret = ""
insert = "insertOne"
i_values_ret_flag = False
tableName = i_into()
col = ' '.join(sqlQuery)
col = col[col.index(tableName)+len(tableName)+1:]
col = col[:col.index(")")].split(",")
val = sqlQuery[sqlQuery.index("VALUES") + 1:]
val = ''.join(val).replace("(", "")
val = val.replace(")", "").split(",")
if len(val) > len(col):
insert = "insertMany"
i_values_ret += "[ "
i_values_ret_flag = True
d = dict()
for x in range(int(len(val)/len(col))): # 0 - 1
for i in range(len(col)): # 0 - 2
d[col[i]] = val[i+x*len(col)]
i_values_ret += str(d)
if i_values_ret_flag:
i_values_ret += " , "
if i_values_ret_flag:
i_values_ret = i_values_ret[:-3]
i_values_ret += " ]"
return i_values_ret.replace("'", ""), insert
tableName = i_into()
i_val_ret = i_values()
return mongoQuery + tableName + "." + i_val_ret[1] + "( " + i_val_ret[0] + " )"
functionSwitch = {"SELECT": select_query, # SELECT .... FROM .... WHERE
"CREATE": create_query, # CREATE TABLE .(NAME). ( comma seperated values )
"DELETE": delete_query, # DELETE FROM .(NAME). WHERE
"INSERT": insert_query, # INSERT INTO .(NAME). ( WHERE TO INSERT ? ) --> VALUES ( WHAT TO INSERT? )
"UPDATE": ()} # UPDATE .(NAME). SET (WHO TO SET and WHAT) WHERE ....
class QueryError(Exception):
pass
class NonBlockingStreamReader:
def __init__(self, stream=None):
# stream: the stream to read from.
# Usually a process' stdout or stderr.
self.is_running = False
self._s = stream
self._q = Queue()
self._t = None
@staticmethod
def _populate_queue(stream, queue):
# Collect lines from 'stream' and put them in 'queue'.
while True:
try:
line = stream.readline()
except ValueError:
break
if line:
queue.put(line)
else:
pass
def start(self):
self.is_running = True
self._t = Thread(target=self._populate_queue, args=(self._s, self._q))
self._t.daemon = True
self._t.start() # start collecting lines from the stream
def get_is_running(self):
return self.is_running
def set_stream(self, s):
self._s = s
def get_stream(self):
return self._s
def readline(self, timeout=None):
try:
return self._q.get(block=timeout is not None, timeout=timeout)
except Empty:
pass
class UnexpectedEndOfStream(Exception):
pass
def readNonBlocking():
global nonBlocking
str = ""
while True:
output = nonBlocking.readline(5)
# 0.1 secs to let the shell output the result
if not output:
return str
str += output
sqlQuery = []
sqlQuery = sys.argv[1:]
# sqlQuery = "SELECT * FROM sofer".split()
if sqlQuery[0] in functionSwitch:
try:
mongoQuery = functionSwitch[sqlQuery[0]]()
except QueryError:
mongoQuery = "Unable to convert query file"
else:
mongoQuery = "Unable to convert query file"
# print(mongoQuery)
server = "mongodb://localhost:27017"
process = Popen(["mongo", server], stdin=PIPE, stdout=PIPE, shell=False, universal_newlines=True)
# nonBlocking = NonBlockingStreamReader(stream=process.stdout)
# nonBlocking.start()
# readNonBlocking()
answer = process.communicate(mongoQuery)[0]
index = answer.find('{')
print(index)
final = answer[index + 1:-4]
index = final.find('{')
# answer = readNonBlocking()
print(mongoQuery + "`" + final[index:].replace("\n","@"), end='')
|
fetch_parallel.py
|
import urllib.request
import urllib.parse
import urllib.error
import pathlib
import os
import threading
import concurrent.futures
import time
import urllib3
import queue
def read_url(url, queue):
try:
data = urllib.request.urlopen(url, None, 15).read()
print(f"Fetched {len(data)} from {url}")
queue.put(data)
except Exception as e:
print(e)
def fetch_parallel(urls):
result = queue.Queue()
threads = [threading.Thread(target=read_url, args=(url, result)) for url in urls]
for t in threads:
t.start()
for t in threads:
t.join()
return result
urls = (
"https://github.com/djeada/Od-C-do-Cpp#L-warto%C5%9Bci-i-r-warto%C5%9Bci",
"https://github.com/djeada/Od-C-do-Cpp#L-warto%C5%9Bci-i-r-warto%C5%9Bci",
"https://github.com/djeada/Od-C-do-Cpp#L-warto%C5%9Bci-i-r-warto%C5%9Bci",
)
fetched_sites = fetch_parallel(urls)
result = list()
while not fetched_sites.empty():
result.append(fetched_sites.get_nowait())
|
test_toggle_fullscreen.py
|
import pytest
import threading
from .util import run_test, destroy_window
def toggle_fullscreen():
import webview
def _toggle_fullscreen(webview):
webview.toggle_fullscreen()
t = threading.Thread(target=_toggle_fullscreen, args=(webview,))
t.start()
destroy_window(webview)
webview.create_window('Toggle fullscreen test', 'https://www.example.org')
def test_toggle_fullscreen():
run_test(toggle_fullscreen)
|
net.py
|
"""
Handles P2P connections.
All networking functions are ultimately done through
this class.
"""
import hashlib
import signal
import zlib
from ast import literal_eval
from .nat_pmp import NatPMP
from .rendezvous_client import *
from .unl import UNL
from .upnp import *
# How many times a single message can be retransmitted.
max_retransmissions = 1
# Minimum time that must pass between retransmissions.
min_retransmit_interval = 5
# A theoretical time for a message to propagate across the network.
propagation_delay = 5
# A table of message hashes for received messages.
seen_messages = {}
# How often to get new DHT messages.
dht_msg_interval = 5
# How often to re-bootstrap.
rendezvous_interval = 30 * 60
# How often to re-advertise node.
# Update bootstrapping server every 24 hours.
advertise_interval = 60 * 60 * 12
# Time that must elapse between accepting simultaneous opens.
sim_open_interval = 2
# Bootstrapping + TCP hole punching server.
rendezvous_servers = [
{
"addr": "157.245.173.223",
# "addr": "162.243.213.95",
"port": 8000
}
]
# Web server running script to test port forwarding.
# And get WAN IP address.
forwarding_servers = [
# {
# "addr": "185.86.149.128",
# "port": 80,
# "url": "/net.php"
# },
{
"addr": "157.245.173.223",
# "addr": "185.61.148.22",
"port": 80,
"url": "/net.php"
}
]
# Debug logging.
logging.basicConfig()
log = logging.getLogger(__name__)
def is_msg_old(msg, record_seen=0):
if type(msg) == str:
msg = msg.encode("ascii")
response_hash = hashlib.sha256(msg).hexdigest()
if response_hash in seen_messages:
seen = seen_messages[response_hash]
elapsed = int(time.time()) - seen["last"]
if elapsed < min_retransmit_interval:
return 1
if seen["times"] >= max_retransmissions:
return 1
if record_seen:
record_msg_hash(msg)
return 0
def record_msg_hash(msg):
if type(msg) == str:
msg = msg.encode("ascii")
response_hash = hashlib.sha256(msg).hexdigest()
if not is_msg_old(msg):
timestamp = int(time.time())
if response_hash in seen_messages:
seen = seen_messages[response_hash]
seen["times"] += 1
seen["last"] = timestamp
else:
seen_messages[response_hash] = {
"times": 1,
"last": timestamp
}
return 1
else:
return 0
def clear_seen_messages():
global seen_messages
seen_messages = {}
class Net:
def __init__(self, net_type="p2p", nat_type="unknown", node_type="unknown",
max_outbound=10, max_inbound=10, passive_bind="0.0.0.0",
passive_port=50500, interface="default", wan_ip=None,
dht_node=None, error_log_path="error.log", debug=0,
sys_clock=None, servers=None):
# List of outbound connections (from us, to another node.)
self.outbound = []
# List of inbound connections (to us, from another node.)
self.inbound = []
# Socket to receive inbound connections on.
self.passive = None
# Type of node: simultaneous, active, passive.
self.node_type = node_type
# NAT type: preserving, delta, reuse, random.
self.nat_type = nat_type
# Address to listen() on for inbound cons.
self.passive_bind = passive_bind
# Port to listen() on for inbound cons.
self.passive_port = int(passive_port)
# How many connections can we accept from other nodes?
self.max_outbound = int(max_outbound)
# How many connections can we make to other nodes?
self.max_inbound = int(max_inbound)
# List of servers to do port forwarding checks.
self.forwarding_servers = forwarding_servers
# Unix timestamp of last bootstrap.
self.last_bootstrap = None
# Unix timestamp of last DHT direct message.
self.last_dht_msg = None
# Unix timestamp of last advertise.
self.last_advertise = None
# What interface to make outbound connections from?
self.interface = interface
# Skip advertise if we have at least this many inbound connections.
self.min_connected = 3
# Unix timestamp of last simultaneous open challenge.
self.last_passive_sim_open = 0
# Does this Net instance need to bootstrap?
self.enable_bootstrap = 1
# Does this Net instance need to advertise?
self.enable_advertise = 1
# Should we try open ports?
self.enable_forwarding = 1
# Is simultaneous open enabled?
self.enable_simultaneous = 1
# Does this Net instance reject duplicate messages
# (same hash as previous messages)?
self.enable_duplicates = 1
# Where should I store errors?
self.error_log_path = error_log_path
# Indicates port forwarding state.
self.forwarding_type = "manual"
# Debug mode shows debug messages.
self.debug = debug
# Network: p2p or direct.
self.net_type = net_type
# Calculate clock skew from NTP.
self.sys_clock = sys_clock
# List of rendezvous servers.
self.rendezvous_servers = servers or rendezvous_servers
# Rendezvous / boostrapping client.
self.rendezvous = RendezvousClient(
self.nat_type, rendezvous_servers=self.rendezvous_servers,
interface=self.interface,
sys_clock=self.sys_clock
)
# DHT node for receiving direct messages from other nodes.
self.dht_node = dht_node
# DHT messages received from DHT.
self.dht_messages = []
# Subscribes to certain messages from DHT.
# Todo: move status messages to file transfer client
def build_dht_msg_handler():
def dht_msg_handler(node, msg):
self.debug_print("DHT msg handler in Net")
valid_needles = [
'^REVERSE_CONNECT',
'^REVERSE_QUERY',
'^REVERSE_ORIGIN',
"""u?("|')status("|')(:|,)\s+u?("|')SYN("|')""",
"""u?("|')status("|')(:|,)\s+u?("|')SYN-ACK("|')""",
"""u?("|')status("|')(:|,)\s+u?("|')ACK("|')""",
"""u?("|')status("|')(:|,)\s+u?("|')RST("|')""",
]
# Convert zlib packed binary to Python object.
self.debug_print("In net dht" + str(type(msg)))
if type(msg) == type(b""):
try:
msg = literal_eval(zlib.decompress(msg))
except:
pass
# Encode result to unicode for RE checks.
"""
If buffer errors result: enable this.
try:
if sys.version_info >= (3, 0, 0):
if type(msg) == bytes:
msg = msg.decode("utf-8")
else:
if type(msg) == str:
msg = unicode(msg)
except:
return
"""
# Check for matches.
for needle in valid_needles:
if re.search(needle, str(msg)) is not None:
msg = {
u"message": msg,
u"source": None
}
self.dht_messages.append(msg)
return
return dht_msg_handler
# Add message handler to DHT for our messages.
self.dht_msg_handler = build_dht_msg_handler()
if self.dht_node is not None:
self.dht_node.add_message_handler(self.dht_msg_handler)
# External IP of this node.
self.wan_ip = wan_ip or get_wan_ip()
# Node type details only known after network is start()ed.
self.unl = None
# List of connections that still need to respond to
# our reverse query.
self.pending_reverse_queries = []
# Time frame for connection to respond to reverse query.
self.reverse_query_expiry = 60
# Enable more than one connection to the same IP.
self.enable_duplicate_ip_cons = 0
# Net instances hide their con details to prioritise direct cons.
if self.net_type == "direct":
self.disable_bootstrap()
self.enable_duplicate_ip_cons = 1
# Set to 1 when self.start() has been called.
self.is_net_started = 0
# Start synchronize thread.
# t = Thread(target=self.synchronize_loop)
# t.setDaemon(True)
# t.start()
def synchronize_loop(self):
while 1:
if self.is_net_started:
self.synchronize()
time.sleep(5)
def debug_print(self, msg):
log.debug(str(msg))
def disable_duplicates(self):
self.enable_duplicates = 0
def disable_bootstrap(self):
self.enable_bootstrap = 0
def disable_advertise(self):
self.enable_advertise = 0
def disable_simultaneous(self):
self.enable_simultaneous = 0
def disable_forwarding(self):
self.enable_forwarding = 0
def get_connection_no(self):
return len(self.outbound) + len(self.inbound)
# Used to reject duplicate connections.
def validate_node(self, node_ip, node_port=None, same_nodes=1):
self.debug_print("Validating: " + node_ip)
# Is this a valid IP?
if not is_ip_valid(node_ip) or node_ip == "0.0.0.0":
self.debug_print("Invalid node ip in validate node")
return 0
# Is this a valid port?
if node_port != 0 and node_port is not None:
if not is_valid_port(node_port):
self.debug_print("Invalid node port in validate port")
return 0
"""
Don't accept connections from self to passive server
or connections to already connected nodes.
"""
if not self.enable_duplicate_ip_cons:
# Don't connect to ourself.
if (node_ip == "127.0.0.1" or
node_ip == get_lan_ip(self.interface) or
node_ip == self.wan_ip):
self.debug_print("Cannot connect to ourself.")
return 0
# No, really: don't connect to ourself.
if node_ip == self.passive_bind and node_port == self.passive_port:
self.debug_print("Error connecting to same listen server.")
return 0
# Don't connect to same nodes.
if same_nodes:
for node in self.outbound + self.inbound:
try:
addr, port = node["con"].s.getpeername()
if node_ip == addr:
self.debug_print("Already connected to this node.")
return 0
except Exception as e:
print(e)
return 0
return 1
# Make an outbound con to a passive or simultaneous node.
def add_node(self, node_ip, node_port, node_type, timeout=5):
# Correct type for port.
node_port = int(node_port)
# Debug info.
msg = "Attempting to connect to %s:%s:%s" % (
node_ip, str(node_port), node_type
)
self.debug_print(msg)
# Already connected to them.
con = None
try:
if not self.enable_duplicate_ip_cons:
for node in self.outbound + self.inbound:
if node_ip == node["ip"]:
self.debug_print("Already connected.")
con = node["con"]
return con
# Avoid connecting to ourself.
if not self.validate_node(node_ip, node_port):
self.debug_print("Validate node failed.")
return None
# Make a simultaneous open connection.
if node_type == "simultaneous" and self.enable_simultaneous:
# Check they've started net first
# If they haven't we won't know the NAT details / node type.
if not self.is_net_started:
raise Exception("Make sure to start net before you add"
" node.")
if self.nat_type in self.rendezvous.predictable_nats:
# Attempt to make active simultaneous connection.
old_timeout = self.rendezvous.timeout
try:
self.rendezvous.timeout = timeout
self.debug_print("Attempting simultaneous challenge")
con = self.rendezvous.simultaneous_challenge(
node_ip, node_port, "TCP"
)
except Exception as e:
self.debug_print("sim challenge failed")
error = parse_exception(e)
self.debug_print(error)
log_exception(self.error_log_path, error)
return None
self.rendezvous.timeout = old_timeout
# Record node details and return con.
self.rendezvous.simultaneous_cons = []
if con is not None:
node = {
"con": con,
"type": "simultaneous",
"ip": node_ip,
"port": 0
}
self.outbound.append(node)
self.debug_print("SUCCESS")
else:
self.debug_print("FAILURE")
# Passive outbound -- easiest to connect to.
if node_type == "passive":
try:
# Try connect to passive server.
con = Sock(node_ip, node_port, blocking=0,
timeout=timeout, interface=self.interface)
node = {
"con": con,
"type": "passive",
"ip": node_ip,
"port": node_port
}
self.outbound.append(node)
self.debug_print("SUCCESS")
except Exception as e:
self.debug_print("FAILURE")
error = parse_exception(e)
self.debug_print(error)
log_exception(self.error_log_path, error)
return None
# Return new connection.
return con
finally:
# Remove undesirable messages from replies.
# Save message: 0 = no, 1 = yes.
def filter_msg_check_builder():
def filter_msg_check(msg):
# Allow duplicate replies?
record_seen = not self.enable_duplicates
# Check if message is old.
return not is_msg_old(msg, record_seen)
return filter_msg_check
# Patch sock object to reject duplicate replies
# If it's enabled.
if con is not None:
con.reply_filter = filter_msg_check_builder()
def bootstrap(self):
"""
When the software is first started, it needs to retrieve
a list of nodes to connect to the network to. This function
asks the server for N nodes which consists of at least N
passive nodes and N simultaneous nodes. The simultaneous
nodes are prioritized if the node_type for the machine
running this software is simultaneous, with passive nodes
being used as a fallback. Otherwise, the node exclusively
uses passive nodes to bootstrap.
This algorithm is designed to preserve passive node's
inbound connection slots.
"""
# Disable bootstrap.
if not self.enable_bootstrap:
return None
# Avoid raping the rendezvous server.
t = time.time()
if self.last_bootstrap is not None:
if t - self.last_bootstrap <= rendezvous_interval:
self.debug_print("Bootstrapped recently")
return None
self.last_bootstrap = t
self.debug_print("Searching for nodes to connect to.")
try:
connection_slots = self.max_outbound - (len(self.outbound))
if connection_slots > 0:
# Connect to rendezvous server.
rendezvous_con = self.rendezvous.server_connect()
# Retrieve random nodes to bootstrap with.
rendezvous_con.send_line("BOOTSTRAP " +
str(self.max_outbound * 2))
choices = rendezvous_con.recv_line(timeout=2)
if choices == "NODES EMPTY":
rendezvous_con.close()
self.debug_print("Node list is empty.")
return self
else:
self.debug_print("Found node list.")
# Parse node list.
choices = re.findall("(?:(p|s)[:]([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+)[:]([0-9]+))+\s?", choices)
rendezvous_con.s.close()
# Attempt to make active simultaneous connections.
passive_nodes = []
for node in choices:
# Out of connection slots.
if not connection_slots:
break
# Add to list of passive nodes.
node_type, node_ip, node_port = node
self.debug_print(str(node))
if node_type == "p":
passive_nodes.append(node)
# Use passive to make up the remaining cons.
i = 0
while i < len(passive_nodes) and connection_slots > 0:
node_type, node_ip, node_port = passive_nodes[i]
con = self.add_node(node_ip, node_port, "passive")
if con is not None:
connection_slots -= 1
self.debug_print("Con successful.")
else:
self.debug_print("Con failed.")
i += 1
except Exception as e:
self.debug_print("Unknown error in bootstrap()")
error = parse_exception(e)
log_exception(self.error_log_path, error)
return self
def advertise(self):
"""
This function tells the rendezvous server that our node is ready to
accept connections from other nodes on the P2P network that run the
bootstrap function. It's only used when net_type == p2p
"""
# Advertise is disabled.
if not self.enable_advertise:
self.debug_print("Advertise is disbled!")
return None
# Direct net server is reserved for direct connections only.
if self.net_type == "direct" and self.node_type == "passive":
return None
# Net isn't started!.
if not self.is_net_started:
raise Exception("Please call start() before you call advertise()")
# Avoid raping the rendezvous server with excessive requests.
t = time.time()
if self.last_advertise is not None:
if t - self.last_advertise <= advertise_interval:
return None
if len(self.inbound) >= self.min_connected:
return None
self.last_advertise = t
# Tell rendezvous server to list us.
try:
# We're a passive node.
if self.node_type == "passive" and\
self.passive_port is not None and\
self.enable_advertise:
self.rendezvous.passive_listen(self.passive_port,
self.max_inbound)
"""
Simultaneous open is only used as a fail-safe for connections to
nodes on the direct_net and only direct_net can list itself as
simultaneous so its safe to leave this enabled.
"""
if self.node_type == "simultaneous":
self.rendezvous.simultaneous_listen()
except Exception as e:
error = parse_exception(e)
log_exception(self.error_log_path, error)
return self
def determine_node(self):
"""
Determines the type of node based on a combination of forwarding
reachability and NAT type.
"""
# Manually set node_type as simultaneous.
if self.node_type == "simultaneous":
if self.nat_type != "unknown":
return "simultaneous"
# Get IP of binding interface.
unspecific_bind = ["0.0.0.0", "127.0.0.1", "localhost"]
if self.passive_bind in unspecific_bind:
lan_ip = get_lan_ip(self.interface)
else:
lan_ip = self.passive_bind
# Passive node checks.
if lan_ip is not None \
and self.passive_port is not None and self.enable_forwarding:
self.debug_print("Checking if port is forwarded.")
# Check port isn't already forwarded.
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
msg = "Port already forwarded. Skipping NAT traversal."
self.debug_print(msg)
self.forwarding_type = "forwarded"
return "passive"
else:
self.debug_print("Port is not already forwarded.")
# Most routers.
try:
self.debug_print("Trying UPnP")
UPnP(self.interface).forward_port("TCP", self.passive_port,
lan_ip)
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
self.forwarding_type = "UPnP"
self.debug_print("Forwarded port with UPnP.")
else:
self.debug_print("UPnP failed to forward port.")
except Exception as e:
# Log exception.
error = parse_exception(e)
log_exception(self.error_log_path, error)
self.debug_print("UPnP failed to forward port.")
# Apple devices.
try:
self.debug_print("Trying NATPMP.")
NatPMP(self.interface).forward_port("TCP",
self.passive_port,
lan_ip)
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
self.forwarding_type = "NATPMP"
self.debug_print("Port forwarded with NATPMP.")
else:
self.debug_print("Failed to forward port with NATPMP.")
self.debug_print("Falling back on TCP hole punching or"
" proxying.")
except Exception as e:
# Log exception
error = parse_exception(e)
log_exception(self.error_log_path, error)
self.debug_print("Failed to forward port with NATPMP.")
# Check it worked.
if self.forwarding_type != "manual":
return "passive"
# Fail-safe node types.
if self.nat_type != "unknown":
return "simultaneous"
else:
return "active"
# Receive inbound connections.
def start_passive_server(self):
self.passive = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.passive.bind((self.passive_bind, self.passive_port))
self.passive.listen(self.max_inbound)
# Check bound local port.
if not self.passive_port:
self.passive_port = self.passive.getsockname()[1]
def start(self):
"""
This function determines node and NAT type, saves connectivity details,
and starts any needed servers to be a part of the network. This is
usually the first function called after initialising the Net class.
"""
self.debug_print("Starting networking.")
self.debug_print("Make sure to iterate over replies if you need"
" connection alive management!")
# Register a cnt + c handler
signal.signal(signal.SIGINT, self.stop)
# Save WAN IP.
self.debug_print("WAN IP = " + str(self.wan_ip))
# Check rendezvous server is up.
try:
rendezvous_con = self.rendezvous.server_connect()
rendezvous_con.close()
except:
raise Exception("Unable to connect to rendezvous server.")
# Started no matter what
# since LAN connections are always possible.
self.start_passive_server()
# Determine NAT type.
if self.nat_type == "unknown":
self.debug_print("Determining NAT type.")
nat_type = self.rendezvous.determine_nat()
if nat_type is not None and nat_type != "unknown":
self.nat_type = nat_type
self.rendezvous.nat_type = nat_type
self.debug_print("NAT type = " + nat_type)
else:
self.debug_print("Unable to determine NAT type.")
# Check NAT type if node is simultaneous
# is manually specified.
if self.node_type == "simultaneous":
if self.nat_type not in self.rendezvous.predictable_nats:
self.debug_print("Manual setting of simultanous specified but"
" ignored since NAT does not support it.")
self.node_type = "active"
else:
# Determine node type.
self.debug_print("Determining node type.")
# No checks for manually specifying passive
# (there probably should be.)
if self.node_type == "unknown":
self.node_type = self.determine_node()
# Prevent P2P nodes from running as simultaneous.
if self.net_type == "p2p":
"""
TCP hole punching is reserved specifically for direct networks
(a net object reserved for receiving direct connections
-- p2p is for connecting to the main network. The reason for this
is you can't do multiple TCP hole punches at the same time so
reserved for direct network where it's most needed.
"""
if self.node_type == "simultaneous":
self.debug_print("Simultaneous is not allowed for P2P")
self.node_type = "active"
self.disable_simultaneous()
self.debug_print("Node type = " + self.node_type)
# Close stray cons from determine_node() tests.
self.close_cons()
# Set net started status.
self.is_net_started = 1
# Initialise our UNL details.
self.unl = UNL(
net=self,
dht_node=self.dht_node,
wan_ip=self.wan_ip
)
# Nestled calls.
return self
def stop(self, signum=None, frame=None):
self.debug_print("Stopping networking.")
if self.passive is not None:
try:
self.passive.shutdown(1)
except:
pass
self.passive.close()
self.passive = None
if self.last_advertise is not None:
self.rendezvous.leave_fight()
"""
Just let the threads timeout by themselves.
Otherwise mutex deadlocks could occur.
for unl_thread in self.unl.unl_threads:
unl_thread.exit()
"""
for con in self:
con.close()
if signum is not None:
raise Exception("Process was interrupted.")
# Return a connection that matches a remote UNL.
def con_by_unl(self, unl, cons=None):
if cons is None:
cons = self.outbound + self.inbound
for con in cons:
if not isinstance(con, Sock):
con = con["con"]
if con.unl is not None:
self.debug_print("CMP")
self.debug_print(unl)
self.debug_print(con.unl)
if unl == con.unl:
# Connection not ready.
if con.nonce is None and self.net_type == "direct":
continue
return con
else:
self.debug_print("\a")
self.debug_print("Con UNL is None (in con by unl)")
self.debug_print(cons)
return None
# Return a connection by its IP.
def con_by_ip(self, ip):
for node in self.outbound + self.inbound:
# Used to block UNLs until nonces are received.
# Otherwise they might try do I/O and ruin their protocols.
if self.net_type == "direct":
if node["con"].nonce is None and self.net_type == "direct":
continue
if node["ip"] == ip:
return node["con"]
return None
def generate_con_id(self, nonce, their_wan_ip, our_wan_ip):
# Convert WAN IPs to bytes.
if sys.version_info >= (3, 0, 0):
if type(their_wan_ip) == str:
their_wan_ip = their_wan_ip.encode("ascii")
if type(our_wan_ip) == str:
our_wan_ip = our_wan_ip.encode("ascii")
else:
if type(their_wan_ip) == unicode:
their_wan_ip = str(their_wan_ip)
if type(our_wan_ip) == our_wan_ip:
our_wan_ip = str(our_wan_ip)
# Hash WAN IPs to make them the same length.
their_wan_ip = hashlib.sha256(their_wan_ip).hexdigest().encode("ascii")
our_wan_ip = hashlib.sha256(our_wan_ip).hexdigest().encode("ascii")
# Derive fingerprint.
int_their_wan_ip = int(their_wan_ip, 16)
int_our_wan_ip = int(our_wan_ip, 16)
if int_our_wan_ip > int_their_wan_ip:
fingerprint = hashlib.sha256(our_wan_ip + their_wan_ip)
else:
# If both are the same the order doesn't matter.
fingerprint = hashlib.sha256(their_wan_ip + our_wan_ip)
fingerprint = fingerprint.hexdigest().encode("ascii")
# Convert nonce to bytes.
if sys.version_info >= (3, 0, 0):
if type(nonce) == str:
nonce = nonce.encode("ascii")
else:
if type(nonce) == unicode:
nonce = str(nonce)
# Generate con ID.
con_id = hashlib.sha256(nonce + fingerprint).hexdigest()
# Convert to unicode.
if sys.version_info >= (3, 0, 0):
if type(con_id) == bytes:
con_id = con_id.decode("utf-8")
else:
if type(con_id) == str:
con_id = unicode(con_id)
# Return con ID.
return con_id
def con_by_id(self, expected_id):
for node in self.outbound + self.inbound:
# Nothing to test.
if node["con"].nonce is None and self.net_type == "direct":
self.debug_print("Nonce not set")
continue
# Generate con_id from con.
try:
their_wan_ip, junk = node["con"].s.getpeername()
except:
continue
if is_ip_private(their_wan_ip):
our_wan_ip = get_lan_ip(self.interface)
else:
our_wan_ip = self.wan_ip
found_id = self.generate_con_id(
node["con"].nonce,
their_wan_ip,
our_wan_ip
)
# Check result.
if found_id == expected_id:
return node["con"]
return None
# Send a message to all currently established connections.
def broadcast(self, msg, source_con=None):
for node in self.outbound + self.inbound:
if node["con"] != source_con:
node["con"].send_line(msg)
def close_cons(self):
# Close all connections.
for node in self.inbound + self.outbound:
node["con"].close()
# Flush client queue for passive server.
if self.node_type == "passive" and self.passive is not None:
self.passive.close()
self.start_passive_server()
# Start from scratch.
self.inbound = []
self.outbound = []
def synchronize(self):
# Clean up dead connections.
for node_list_name in ["self.inbound", "self.outbound"]:
node_list = eval(node_list_name)[:]
for node in node_list:
if not node["con"].connected:
self.debug_print("\a")
self.debug_print("Removing disconnected: " + str(node))
eval(node_list_name).remove(node)
# Timeout connections that haven't responded to reverse query.
old_reverse_queries = []
for reverse_query in self.pending_reverse_queries:
duration = time.time() - reverse_query["timestamp"]
if duration >= self.reverse_query_expiry:
reverse_query["con"].close()
old_reverse_queries.append(reverse_query)
# Remove old reverse queries.
for reverse_query in old_reverse_queries:
self.pending_reverse_queries.remove(reverse_query)
# Get connection nonce (for building IDs.)
if self.net_type == "direct":
for node in self.inbound + self.outbound:
if node["con"].nonce is not None:
continue
# Receive nonce part.
if len(node["con"].nonce_buf) < 64:
assert(node["con"].blocking != 1)
remaining = 64 - len(node["con"].nonce_buf)
nonce_part = node["con"].recv(remaining)
if len(nonce_part):
node["con"].nonce_buf += nonce_part
# Set nonce.
if len(node["con"].nonce_buf) == 64:
node["con"].nonce = node["con"].nonce_buf
# Check for reverse connect requests.
if self.dht_node is not None and self.net_type == "direct":
# Don't do this every synch cycle.
t = time.time()
skip_dht_check = 0
if self.last_dht_msg is not None:
if t - self.last_dht_msg > dht_msg_interval:
skip_dht_check = 1
if not skip_dht_check and len(self.dht_messages):
processed = []
for dht_response in self.dht_messages:
# Found reverse connect request.
msg = str(dht_response["message"])
if re.match("^REVERSE_CONNECT:[a-zA-Z0-9+/-=_\s]+:[a-fA-F0-9]{64}$", msg) is not None:
# Process message.
self.debug_print(str(msg))
call, their_unl, nonce = msg.split(":")
their_unl = UNL(value=their_unl).deconstruct()
our_unl = UNL(value=self.unl.value).deconstruct()
node_id = their_unl["node_id"]
# Are we already connected.
is_connected = False
if nonce == "0" * 64:
# Use LAN IPs.
their_ip = their_unl["wan_ip"]
our_ip = our_unl["wan_ip"]
if their_ip == our_ip:
their_ip = their_unl["lan_ip"]
our_ip = our_unl["lan_ip"]
# Get con ID.
con_id = self.generate_con_id(
nonce,
their_ip,
our_ip
)
# Find con if it exists.
if self.con_by_id(con_id) is not None:
is_connected = True
else:
if self.con_by_unl(their_unl) is not None:
is_connected = True
# Skip if already connected.
if is_connected:
processed.append(dht_response)
continue
# Ask if the source sent it.
def success_builder():
def success(con):
# Indicate status.
self.debug_print("Received reverse connect"
" notice")
self.debug_print(nonce)
# Did you send this?
query = "REVERSE_QUERY:" + self.unl.value
self.dht_node.repeat_relay_message(node_id,
query)
# Record pending query state.
query = {
"unl": their_unl["value"],
"con": con,
"timestamp": time.time()
}
self.pending_reverse_queries.append(query)
return success
self.debug_print("Attempting to do reverse connect")
self.unl.connect(their_unl["value"],
{"success": success_builder()},
nonce=nonce)
processed.append(dht_response)
# Found reverse query (did you make this?)
elif re.match("^REVERSE_QUERY:[a-zA-Z0-9+/-=_\s]+$", msg)\
is not None:
# Process message.
self.debug_print("Received reverse query")
call, their_unl = msg.split(":")
their_unl = UNL(value=their_unl).deconstruct()
node_id = their_unl["node_id"]
# Do we know about this?
if their_unl["value"] not in \
self.unl.pending_reverse_con:
self.debug_print(their_unl)
self.debug_print(str(self.unl.pending_reverse_con))
self.debug_print("oops, we don't know about this"
" reverse query!")
processed.append(dht_response)
continue
else:
self.unl.pending_reverse_con.remove(
their_unl["value"])
# Send query.
query = "REVERSE_ORIGIN:" + self.unl.value
self.dht_node.repeat_relay_message(node_id, query)
processed.append(dht_response)
# Found reverse origin (yes I made this.)
elif re.match("^REVERSE_ORIGIN:[a-zA-Z0-9+/-=_\s]+$", msg) \
is not None:
self.debug_print("Received reverse origin")
for reverse_query in self.pending_reverse_queries:
pattern = "^REVERSE_ORIGIN:" + reverse_query["unl"]
pattern += "$"
if re.match(pattern, msg) is not None:
self.debug_print("Removing pending reverse"
" query: success!")
self.pending_reverse_queries.remove(
reverse_query)
processed.append(dht_response)
# Remove processed messages.
for msg in processed:
self.debug_print(msg)
self.dht_messages.remove(msg)
self.last_dht_msg = t
# Accept inbound connections.
if len(self.inbound) < self.max_inbound:
# Accept new passive inbound connections.
if self.passive is not None:
r, w, e = select.select([self.passive], [], [], 0)
for s in r:
if s == self.passive:
# Accept a new con from the listen queue.
client, address = self.passive.accept()
con = Sock(blocking=0)
con.set_sock(client)
node_ip, node_port = con.s.getpeername()
# Reject duplicate connections.
if self.validate_node(node_ip, node_port):
try:
node = {
"type": "accept",
"con": con,
"ip": con.s.getpeername()[0],
"port": con.s.getpeername()[1],
}
self.inbound.append(node)
self.debug_print(
"Accepted new passive connection: " +
str(node))
except:
log.debug("con.s.get")
else:
self.debug_print("Validation failure")
con.close()
# Accept new passive simultaneous connections.
if self.node_type == "simultaneous":
"""
This is basically the code that passive simultaneous
nodes periodically call to parse any responses from the
Rendezvous Server which should hopefully be new
requests to initiate hole punching from active
simultaneous nodes.
If a challenge comes in, the passive simultaneous
node accepts the challenge by giving details to the
server for the challenging node (active simultaneous)
to complete the simultaneous open.
"""
# try:
t = time.time()
if self.rendezvous.server_con is not None:
for reply in self.rendezvous.server_con:
# Reconnect.
if re.match("^RECONNECT$", reply) is not None:
if self.enable_advertise:
self.rendezvous.simultaneous_listen()
continue
# Find any challenges.
# CHALLENGE 192.168.0.1 50184 50185 50186 50187 TCP
parts = re.findall("^CHALLENGE ([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+) ((?:[0-9]+\s?)+) (TCP|UDP)$", reply)
if not len(parts):
continue
(candidate_ip, candidate_predictions, candidate_proto)\
= parts[0]
self.debug_print("Found challenge")
self.debug_print(parts[0])
# Already connected.
if not self.validate_node(candidate_ip):
self.debug_print("validation failed")
continue
# Last meeting was too recent.
if t - self.last_passive_sim_open < sim_open_interval:
continue
# Accept challenge.
if self.sys_clock is not None:
origin_ntp = self.sys_clock.time()
else:
origin_ntp = get_ntp()
if origin_ntp is None:
continue
msg = "ACCEPT %s %s TCP %s" % (
candidate_ip,
self.rendezvous.predictions,
str(origin_ntp)
)
ret = self.rendezvous.server_con.send_line(msg)
if not ret:
continue
"""
Adding threading here doesn't work because Python's
fake threads and the act of starting a thread ruins
the timing between code synchronisation - especially
code running on the same host or in a LAN. Will
compensate by reducing the NTP delay to have the
meetings occur faster and setting a limit for meetings
to occur within the same period.
"""
# Walk to fight and return holes made.
self.last_passive_sim_open = t
con = self.rendezvous.attend_fight(
self.rendezvous.mappings, candidate_ip,
candidate_predictions, origin_ntp
)
if con is not None:
try:
node = {
"type": "simultaneous",
"con": con,
"ip": con.s.getpeername()[0],
"port": con.s.getpeername()[1],
}
self.inbound.append(node)
except:
log.debug(str(e))
pass
# Create new predictions ready to accept next client.
self.rendezvous.simultaneous_cons = []
if self.enable_advertise:
self.rendezvous.simultaneous_listen()
# QUIT - remove us from bootstrapping server.
if len(self.inbound) == self.max_inbound:
try:
# Remove advertise.
self.rendezvous.leave_fight()
except:
pass
# Bootstrap again if needed.
self.bootstrap()
# Relist node again if noded.
self.advertise()
"""
These functions here make the class behave like a list. The
list is a collection of connections (inbound) + (outbound.)
Every iteration also has the bonus of reaping dead connections,
making new ones (if needed), and accepting connections
"""
def __len__(self):
self.synchronize()
return len(self.inbound) + len(self.outbound)
def __iter__(self):
# Process connections.
self.synchronize()
# Copy all connections to single buffer.
cons = []
for node in self.inbound + self.outbound:
if node["con"].nonce is None:
if self.net_type == "direct":
continue
cons.append(node["con"])
# Return all cons.
return iter(cons)
if __name__ == "__main__":
"""
net = Net(debug=1)
net.disable_bootstrap()
net.disable_advertise()
net.disable_forwarding()
net.start()
print(net.unl.value)
print(net.unl.deconstruct(net.unl.value))
while 1:
time.sleep(0.5)
# Test simultaneous open.
p2p_net = Net(debug=1, nat_type="preserving", node_type="simultaneous")
p2p_net.start()
p2p_net.disable_advertise()
p2p_net.disable_bootstrap()
# p2p_net.add_node("192.187.97.131", 0, "simultaneous") # Behind NAT
def success_notify(con):
print("SUCCESS THREADING.")
#Test UNL
events = {
"success": success_notify
}
while 1:
time.sleep(0.5)
exit()
#P2P network example.
p2p_net = Net(debug=1)
p2p_net.start()
p2p_net.bootstrap()
p2p_net.advertise()
#Event loop.
while 1:
for con in p2p_net:
for reply in con:
print(reply)
Excluses con from broadcast since we got this message from them
p2p_net.broadcast("Something.", con)
time.sleep(0.5)
#Direct network example.
dht_node = DHT()
direct_net = Net(dht_node=dht_node, debug=1)
direct_net.start()
#Connect to some UNL.
def success(con):
con.send_line("Thanks.")
#Note: this isn't a valid UNL.
#To get your UNL do: direct_net.unl.value.
direct_net.unl.connect("Some guys UNL...", {"success": success})
"""
|
debug_events_writer_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the debug events writer Python class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json as json_lib
import os
import threading
import time
from absl.testing import parameterized
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import debug_events_writer
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.platform import googletest
class DebugEventsWriterTest(dumping_callback_test_lib.DumpingCallbackTestBase,
parameterized.TestCase):
def testMultiThreadedConstructorCallWorks(self):
def init_writer():
debug_events_writer.DebugEventsWriter(self.dump_root)
num_threads = 4
threads = []
for _ in range(num_threads):
thread = threading.Thread(target=init_writer)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
# Verify that there is only one debug event file of each type.
metadata_paths = glob.glob(os.path.join(self.dump_root, "*.metadata"))
self.assertLen(metadata_paths, 1)
source_files_paths = glob.glob(
os.path.join(self.dump_root, "*.source_files"))
self.assertLen(source_files_paths, 1)
stack_frames_paths = glob.glob(
os.path.join(self.dump_root, "*.stack_frames"))
self.assertLen(stack_frames_paths, 1)
graphs_paths = glob.glob(os.path.join(self.dump_root, "*.graphs"))
self.assertLen(graphs_paths, 1)
self._readAndCheckMetadataFile()
def testWriteSourceFilesAndStackFrames(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
num_protos = 10
for i in range(num_protos):
source_file = debug_event_pb2.SourceFile()
source_file.file_path = "/home/tf2user/main.py"
source_file.host_name = "machine.cluster"
source_file.lines.append("print(%d)" % i)
writer.WriteSourceFile(source_file)
stack_frame = debug_event_pb2.StackFrameWithId()
stack_frame.id = "stack_%d" % i
stack_frame.file_line_col.file_index = i * 10
writer.WriteStackFrameWithId(stack_frame)
writer.FlushNonExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(item.debug_event.source_file
for item in reader.source_files_iterator())
self.assertLen(actuals, num_protos)
for i in range(num_protos):
self.assertEqual(actuals[i].file_path, "/home/tf2user/main.py")
self.assertEqual(actuals[i].host_name, "machine.cluster")
self.assertEqual(actuals[i].lines, ["print(%d)" % i])
actuals = list(item.debug_event.stack_frame_with_id
for item in reader.stack_frames_iterator())
self.assertLen(actuals, num_protos)
for i in range(num_protos):
self.assertEqual(actuals[i].id, "stack_%d" % i)
self.assertEqual(actuals[i].file_line_col.file_index, i * 10)
def testWriteGraphOpCreationAndDebuggedGraphs(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
num_op_creations = 10
for i in range(num_op_creations):
graph_op_creation = debug_event_pb2.GraphOpCreation()
graph_op_creation.op_type = "Conv2D"
graph_op_creation.op_name = "Conv2D_%d" % i
writer.WriteGraphOpCreation(graph_op_creation)
debugged_graph = debug_event_pb2.DebuggedGraph()
debugged_graph.graph_id = "deadbeaf"
debugged_graph.graph_name = "MyGraph1"
writer.WriteDebuggedGraph(debugged_graph)
writer.FlushNonExecutionFiles()
reader = debug_events_reader.DebugEventsReader(self.dump_root)
actuals = list(item.debug_event for item in reader.graphs_iterator())
self.assertLen(actuals, num_op_creations + 1)
for i in range(num_op_creations):
self.assertEqual(actuals[i].graph_op_creation.op_type, "Conv2D")
self.assertEqual(actuals[i].graph_op_creation.op_name, "Conv2D_%d" % i)
self.assertEqual(actuals[num_op_creations].debugged_graph.graph_id,
"deadbeaf")
def testConcurrentWritesToNonExecutionFilesWorks(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
source_file_state = {"counter": 0, "lock": threading.Lock()}
def writer_source_file():
source_file = debug_event_pb2.SourceFile()
with source_file_state["lock"]:
source_file.file_path = "/home/tf2user/file_%d.py" % source_file_state[
"counter"]
source_file_state["counter"] += 1
writer.WriteSourceFile(source_file)
# More-frequent-than-necessary concurrent flushing is not recommended,
# but tolerated.
writer.FlushNonExecutionFiles()
stack_frame_state = {"counter": 0, "lock": threading.Lock()}
def write_stack_frame():
stack_frame = debug_event_pb2.StackFrameWithId()
with stack_frame_state["lock"]:
stack_frame.id = "stack_frame_%d" % stack_frame_state["counter"]
stack_frame_state["counter"] += 1
writer.WriteStackFrameWithId(stack_frame)
# More-frequent-than-necessary concurrent flushing is not recommended,
# but tolerated.
writer.FlushNonExecutionFiles()
graph_op_state = {"counter": 0, "lock": threading.Lock()}
def write_graph_op_creation():
graph_op_creation = debug_event_pb2.GraphOpCreation()
with graph_op_state["lock"]:
graph_op_creation.op_name = "Op%d" % graph_op_state["counter"]
graph_op_state["counter"] += 1
writer.WriteGraphOpCreation(graph_op_creation)
# More-frequent-than-necessary concurrent flushing is not recommended,
# but tolerated.
writer.FlushNonExecutionFiles()
num_threads = 9
threads = []
for i in range(num_threads):
if i % 3 == 0:
target = writer_source_file
elif i % 3 == 1:
target = write_stack_frame
else:
target = write_graph_op_creation
thread = threading.Thread(target=target)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
# Verify the content of the .source_files file.
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
source_files_iter = reader.source_files_iterator()
actuals = list(item.debug_event.source_file for item in source_files_iter)
file_paths = sorted([actual.file_path for actual in actuals])
self.assertEqual(file_paths, [
"/home/tf2user/file_0.py", "/home/tf2user/file_1.py",
"/home/tf2user/file_2.py"
])
# Verify the content of the .stack_frames file.
actuals = list(item.debug_event.stack_frame_with_id
for item in reader.stack_frames_iterator())
stack_frame_ids = sorted([actual.id for actual in actuals])
self.assertEqual(stack_frame_ids,
["stack_frame_0", "stack_frame_1", "stack_frame_2"])
# Verify the content of the .graphs file.
actuals = list(item.debug_event.graph_op_creation
for item in reader.graphs_iterator())
graph_op_names = sorted([actual.op_name for actual in actuals])
self.assertEqual(graph_op_names, ["Op0", "Op1", "Op2"])
def testWriteAndReadMetadata(self):
t0 = time.time()
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
writer.Close()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
self.assertIsInstance(reader.starting_wall_time(), float)
self.assertGreaterEqual(reader.starting_wall_time(), t0)
self.assertEqual(reader.tensorflow_version(), versions.__version__)
def testWriteExecutionEventsWithCircularBuffer(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
# Before FlushExecutionFiles() is called. No data should have been written
# to the file.
reader.update()
self.assertFalse(reader.executions())
writer.FlushExecutionFiles()
reader.update()
executions = reader.executions()
for i, execution in enumerate(executions):
self.assertEqual(
execution.op_type,
"OpType%d" % (i + debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE))
def testWriteExecutionEventsWithoutCircularBufferBehavior(self):
# A circular buffer size of 0 abolishes the circular buffer behavior.
writer = debug_events_writer.DebugEventsWriter(self.dump_root, 0)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, num_execution_events)
for i, execution in enumerate(executions):
self.assertEqual(execution.op_type, "OpType%d" % i)
def testWriteGraphExecutionTraceEventsWithCircularBuffer(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
trace = debug_event_pb2.GraphExecutionTrace()
trace.op_name = "Op%d" % i
writer.WriteGraphExecutionTrace(trace)
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(reader.graph_execution_traces_iterator())
# Before FlushExecutionFiles() is called. No data should have been written
# to the file.
self.assertEmpty(actuals)
writer.FlushExecutionFiles()
actuals = list(item.debug_event.graph_execution_trace
for item in reader.graph_execution_traces_iterator())
self.assertLen(actuals, debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE)
for i in range(debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE):
self.assertEqual(
actuals[i].op_name,
"Op%d" % (i + debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE))
def testWriteGraphExecutionTraceEventsWithoutCircularBufferBehavior(self):
# A circular buffer size of 0 abolishes the circular buffer behavior.
writer = debug_events_writer.DebugEventsWriter(self.dump_root, 0)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
trace = debug_event_pb2.GraphExecutionTrace()
trace.op_name = "Op%d" % i
writer.WriteGraphExecutionTrace(trace)
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(item.debug_event.graph_execution_trace
for item in reader.graph_execution_traces_iterator())
self.assertLen(actuals, num_execution_events)
for i in range(num_execution_events):
self.assertEqual(actuals[i].op_name, "Op%d" % i)
def testConcurrentWritesToExecutionFiles(self):
circular_buffer_size = 5
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
execution_state = {"counter": 0, "lock": threading.Lock()}
def write_execution():
execution = debug_event_pb2.Execution()
with execution_state["lock"]:
execution.op_type = "OpType%d" % execution_state["counter"]
execution_state["counter"] += 1
writer.WriteExecution(execution)
graph_execution_trace_state = {"counter": 0, "lock": threading.Lock()}
def write_graph_execution_trace():
with graph_execution_trace_state["lock"]:
op_name = "Op%d" % graph_execution_trace_state["counter"]
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
graph_execution_trace_state["counter"] += 1
writer.WriteGraphOpCreation(graph_op_creation)
writer.WriteGraphExecutionTrace(trace)
threads = []
for i in range(circular_buffer_size * 4):
if i % 2 == 0:
target = write_execution
else:
target = write_graph_execution_trace
thread = threading.Thread(target=target)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
# Verify the content of the .execution file.
executions = reader.executions()
executed_op_types = [execution.op_type for execution in executions]
self.assertLen(executed_op_types, circular_buffer_size)
self.assertLen(executed_op_types, len(set(executed_op_types)))
# Verify the content of the .graph_execution_traces file.
op_names = [trace.op_name for trace in reader.graph_execution_traces()]
self.assertLen(op_names, circular_buffer_size)
self.assertLen(op_names, len(set(op_names)))
def testConcurrentSourceFileRandomReads(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
for i in range(100):
source_file = debug_event_pb2.SourceFile(
host_name="localhost", file_path="/tmp/file_%d.py" % i)
source_file.lines.append("# File %d" % i)
writer.WriteSourceFile(source_file)
writer.FlushNonExecutionFiles()
reader = debug_events_reader.DebugDataReader(self.dump_root)
reader.update()
lines = [None] * 100
def read_job_1():
# Read in the reverse order to enhance randomness of the read access.
for i in range(49, -1, -1):
lines[i] = reader.source_lines("localhost", "/tmp/file_%d.py" % i)
def read_job_2():
for i in range(99, 49, -1):
lines[i] = reader.source_lines("localhost", "/tmp/file_%d.py" % i)
thread_1 = threading.Thread(target=read_job_1)
thread_2 = threading.Thread(target=read_job_2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
for i in range(100):
self.assertEqual(lines[i], ["# File %d" % i])
def testConcurrentExecutionUpdateAndRandomRead(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
circular_buffer_size)
writer_state = {"counter": 0, "done": False}
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
def write_and_update_job():
while True:
if writer_state["done"]:
break
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % writer_state["counter"]
writer_state["counter"] += 1
writer.WriteExecution(execution)
writer.FlushExecutionFiles()
reader.update()
# On the sub-thread, keep writing and reading new Execution protos.
write_and_update_thread = threading.Thread(target=write_and_update_job)
write_and_update_thread.start()
# On the main thread, do concurrent random read.
while True:
exec_digests = reader.executions(digest=True)
if exec_digests:
exec_0 = reader.read_execution(exec_digests[0])
self.assertEqual(exec_0.op_type, "OpType0")
writer_state["done"] = True
break
else:
time.sleep(0.1)
continue
write_and_update_thread.join()
def testConcurrentExecutionRandomReads(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
circular_buffer_size)
for i in range(100):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
reader = debug_events_reader.DebugDataReader(self.dump_root)
reader.update()
executions = [None] * 100
def read_job_1():
execution_digests = reader.executions(digest=True)
# Read in the reverse order to enhance randomness of the read access.
for i in range(49, -1, -1):
execution = reader.read_execution(execution_digests[i])
executions[i] = execution
def read_job_2():
execution_digests = reader.executions(digest=True)
for i in range(99, 49, -1):
execution = reader.read_execution(execution_digests[i])
executions[i] = execution
thread_1 = threading.Thread(target=read_job_1)
thread_2 = threading.Thread(target=read_job_2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
for i in range(100):
self.assertEqual(executions[i].op_type, "OpType%d" % i)
def testConcurrentGraphExecutionTraceUpdateAndRandomRead(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
writer_state = {"counter": 0, "done": False}
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
def write_and_update_job():
while True:
if writer_state["done"]:
break
op_name = "Op%d" % writer_state["counter"]
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer_state["counter"] += 1
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
reader.update()
# On the sub-thread, keep writing and reading new GraphExecutionTraces.
write_and_update_thread = threading.Thread(target=write_and_update_job)
write_and_update_thread.start()
# On the main thread, do concurrent random read.
while True:
digests = reader.graph_execution_traces(digest=True)
if digests:
trace_0 = reader.read_graph_execution_trace(digests[0])
self.assertEqual(trace_0.op_name, "Op0")
writer_state["done"] = True
break
else:
time.sleep(0.1)
continue
write_and_update_thread.join()
def testConcurrentGraphExecutionTraceRandomReads(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
for i in range(100):
op_name = "Op%d" % i
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
reader = debug_events_reader.DebugDataReader(self.dump_root)
reader.update()
traces = [None] * 100
def read_job_1():
digests = reader.graph_execution_traces(digest=True)
for i in range(49, -1, -1):
traces[i] = reader.read_graph_execution_trace(digests[i])
def read_job_2():
digests = reader.graph_execution_traces(digest=True)
for i in range(99, 49, -1):
traces[i] = reader.read_graph_execution_trace(digests[i])
thread_1 = threading.Thread(target=read_job_1)
thread_2 = threading.Thread(target=read_job_2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
for i in range(100):
self.assertEqual(traces[i].op_name, "Op%d" % i)
@parameterized.named_parameters(
("Begin1End3", 1, 3, 1, 3),
("Begin0End3", 0, 3, 0, 3),
("Begin0EndNeg1", 0, -1, 0, 4),
("BeginNoneEnd3", None, 3, 0, 3),
("Begin2EndNone", 2, None, 2, 5),
("BeginNoneEndNone", None, None, 0, 5),
)
def testRangeReadingExecutions(self, begin, end, expected_begin,
expected_end):
writer = debug_events_writer.DebugEventsWriter(
self.dump_root, circular_buffer_size=-1)
for i in range(5):
execution = debug_event_pb2.Execution(op_type="OpType%d" % i)
writer.WriteExecution(execution)
writer.FlushExecutionFiles()
writer.Close()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions(begin=begin, end=end)
self.assertLen(executions, expected_end - expected_begin)
self.assertEqual(executions[0].op_type, "OpType%d" % expected_begin)
self.assertEqual(executions[-1].op_type, "OpType%d" % (expected_end - 1))
@parameterized.named_parameters(
("Begin1End3", 1, 3, 1, 3),
("Begin0End3", 0, 3, 0, 3),
("Begin0EndNeg1", 0, -1, 0, 4),
("BeginNoneEnd3", None, 3, 0, 3),
("Begin2EndNone", 2, None, 2, 5),
("BeginNoneEndNone", None, None, 0, 5),
)
def testRangeReadingGraphExecutionTraces(self, begin, end, expected_begin,
expected_end):
writer = debug_events_writer.DebugEventsWriter(
self.dump_root, circular_buffer_size=-1)
debugged_graph = debug_event_pb2.DebuggedGraph(
graph_id="graph1", graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
for i in range(5):
op_name = "Op_%d" % i
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
writer.Close()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
traces = reader.graph_execution_traces(begin=begin, end=end)
self.assertLen(traces, expected_end - expected_begin)
self.assertEqual(traces[0].op_name, "Op_%d" % expected_begin)
self.assertEqual(traces[-1].op_name, "Op_%d" % (expected_end - 1))
class DataObjectsTest(test_util.TensorFlowTestCase):
def jsonRoundTripCheck(self, obj):
self.assertEqual(
json_lib.dumps(json_lib.loads(json_lib.dumps(obj)), sort_keys=True),
json_lib.dumps(obj, sort_keys=True))
def testExecutionDigestWithNoOutputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=None)
json = execution_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], None)
def testExecutionDigestWithTwoOutputsToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357, 2468])
json = execution_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357, 2468))
def testExecutionNoGraphNoInputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357])
execution = debug_events_reader.Execution(
execution_digest,
"localhost",
("a1", "b2"),
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
graph_id=None,
input_tensor_ids=None,
output_tensor_ids=[2468],
debug_tensor_values=([1, 0],))
json = execution.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357,))
self.assertEqual(json["host_name"], "localhost")
self.assertEqual(json["stack_frame_ids"], ("a1", "b2"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH)
self.assertIsNone(json["graph_id"])
self.assertIsNone(json["input_tensor_ids"])
self.assertEqual(json["output_tensor_ids"], (2468,))
self.assertEqual(json["debug_tensor_values"], ([1, 0],))
def testExecutionNoGraphNoInputButWithOutputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357])
execution = debug_events_reader.Execution(
execution_digest,
"localhost",
("a1", "b2"),
debug_event_pb2.TensorDebugMode.FULL_HEALTH,
graph_id="abcd",
input_tensor_ids=[13, 37],
output_tensor_ids=None,
debug_tensor_values=None)
json = execution.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357,))
self.assertEqual(json["host_name"], "localhost")
self.assertEqual(json["stack_frame_ids"], ("a1", "b2"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.FULL_HEALTH)
self.assertEqual(json["graph_id"], "abcd")
self.assertEqual(json["input_tensor_ids"], (13, 37))
self.assertIsNone(json["output_tensor_ids"])
self.assertIsNone(json["debug_tensor_values"])
def testGraphOpCreationDigestNoInputNoDeviceNameToJson(self):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234, 5678, "deadbeef", "FooOp", "Model_1/Foo_2",
[135], input_names=None, device_name=None)
json = op_creation_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_tensor_ids"], (135,))
self.assertIsNone(json["input_names"])
self.assertIsNone(json["device_name"])
def testGraphOpCreationDigestWithInputsAndDeviceNameToJson(self):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234, 5678, "deadbeef", "FooOp", "Model_1/Foo_2",
[135], input_names=["Bar_1", "Qux_2"], device_name="/device:GPU:0")
json = op_creation_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_tensor_ids"], (135,))
self.assertEqual(json["input_names"], ("Bar_1", "Qux_2"))
self.assertEqual(json["device_name"], "/device:GPU:0")
def testGraphExecutionTraceDigestToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
json = trace_digest.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
def testGraphExecutionTraceWithTensorDebugValueAndDeviceNameToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g1", "g2", "deadbeef"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
debug_tensor_value=[3, 1], device_name="/device:GPU:0")
json = trace.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["graph_ids"], ("g1", "g2", "deadbeef"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH)
self.assertEqual(json["debug_tensor_value"], (3, 1))
self.assertEqual(json["device_name"], "/device:GPU:0")
def testGraphExecutionTraceNoTensorDebugValueNoDeviceNameToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g1", "g2", "deadbeef"],
debug_event_pb2.TensorDebugMode.NO_TENSOR,
debug_tensor_value=None, device_name=None)
json = trace.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["graph_ids"], ("g1", "g2", "deadbeef"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.NO_TENSOR)
self.assertIsNone(json["debug_tensor_value"])
self.assertIsNone(json["device_name"])
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
|
multiprocessing_garbage_collection_prevention.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# {fact rule=multiprocessing-garbage-collection-prevention@v1.0 defects=1}
def garbage_collect_noncompliant(self):
from multiprocessing import Pipe
pipe = Pipe()
try:
# Trigger a refresh.
self.assertFalse(
client._MongoReplicaSetClient__monitor.isAlive())
client.disconnect()
self.assertSoon(
lambda: client._MongoReplicaSetClient__monitor.isAlive())
client.db.collection.find_one()
except Exception:
traceback.print_exc()
pipe.send(True)
def multiprocessing_noncompliant():
from multiprocessing import Process, Pipe
parent_connection, child_connection = Pipe()
# Noncompliant: fails to pass the parent process object to child processes.
process = Process(target=garbage_collect_noncompliant)
process.start()
# {/fact}
# {fact rule=multiprocessing-garbage-collection-prevention@v1.0 defects=0}
def garbage_collect_compliant(self, pipe):
try:
# Trigger a refresh.
self.assertFalse(
client._MongoReplicaSetClient__monitor.isAlive())
client.disconnect()
self.assertSoon(
lambda: client._MongoReplicaSetClient__monitor.isAlive())
client.db.collection.find_one()
except Exception:
traceback.print_exc()
pipe.send(True)
def multiprocessing_compliant():
from multiprocessing import Process, Pipe
parent_connection, child_connection = Pipe()
# Compliant: parent process object is passed to its child processes.
process = Process(target=garbage_collect_compliant,
args=(child_connection,))
process.start()
# {/fact}
|
tello_py3.py
|
import socket
import threading
import time
from stats_py3 import Stats
class Tello:
def __init__(self):
self.local_ip = ''
self.local_port = 8889
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for sending cmd
self.socket.bind((self.local_ip, self.local_port))
# thread for receiving cmd ack
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = True
self.receive_thread.start()
self.tello_ip = '192.168.10.1'
self.tello_port = 8889
self.tello_adderss = (self.tello_ip, self.tello_port)
self.log = []
self.MAX_TIME_OUT = 15.0
def send_command(self, command):
"""
Send a command to the ip address. Will be blocked until
the last command receives an 'OK'.
If the command fails (either b/c time out or error),
will try to resend the command
:param command: (str) the command to send
:param ip: (str) the ip of Tello
:return: The latest command response
"""
self.log.append(Stats(command, len(self.log)))
self.socket.sendto(command.encode('utf-8'), self.tello_adderss)
print('sending command: %s to %s' % (command, self.tello_ip))
start = time.time()
while not self.log[-1].got_response():
now = time.time()
diff = now - start
if diff > self.MAX_TIME_OUT:
print('Max timeout exceeded... command %s' % command)
# TODO: is timeout considered failure or next command still get executed
# now, next one got executed
return
print('Done!!! sent command: %s to %s' % (command, self.tello_ip))
def _receive_thread(self):
"""Listen to responses from the Tello.
Runs as a thread, sets self.response to whatever the Tello last returned.
"""
while True:
try:
self.response, ip = self.socket.recvfrom(1024)
print('from %s: %s' % (ip, self.response))
self.log[-1].add_response(self.response)
except socket.error as exc:
print("Caught exception socket.error : %s" % exc)
def on_close(self):
pass
# for ip in self.tello_ip_list:
# self.socket.sendto('land'.encode('utf-8'), (ip, 8889))
# self.socket.close()
def get_log(self):
return self.log
|
xtpGateway.py
|
# encoding: UTF-8
'''
vn.xtp的gateway接入
'''
import os
import json,copy
import traceback
from vnpy.api.xtp import *
from vnpy.trader.vtGateway import *
from vnpy.trader.vtFunction import getJsonPath, getTempPath
from vnpy.trader.vtUtility import BarGenerator
from pytdx.hq import TdxHq_API
from multiprocessing.dummy import Pool
from threading import Thread
from time import sleep
# 以下为一些VT类型和XTP类型的映射字典
#XTP_PRICE_TYPE是价格类型
#枚举值
# 1 XTP_PRICE_LIMIT 限价单-沪 / 深 / 沪期权(除普通股票业务外,其余业务均使用此种类型)
# 2 XTP_PRICE_BEST_OR_CANCEL 即时成交剩余转撤销,市价单-深 / 沪期权
# 3 XTP_PRICE_BEST5_OR_LIMIT 最优五档即时成交剩余转限价,市价单-沪
# 4 XTP_PRICE_BEST5_OR_CANCEL 最优5档即时成交剩余转撤销,市价单-沪深
# 5 XTP_PRICE_ALL_OR_CANCEL 全部成交或撤销,市价单-深 / 沪期权
# 6 XTP_PRICE_FORWARD_BEST 本方最优,市价单-深
# 7 XTP_PRICE_REVERSE_BEST_LIMIT 对方最优剩余转限价,市价单-深 / 沪期权
# 8 XTP_PRICE_LIMIT_OR_CANCEL 期权限价申报FOK
# 9 XTP_PRICE_TYPE_UNKNOWN 未知或者无效价格类型
# 价格类型映射
priceTypeMap = {}
priceTypeMap[PRICETYPE_LIMITPRICE] = 1
priceTypeMap[PRICETYPE_MARKETPRICE] = 4
priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()}
# 方向类型映射
sideMap = {}
#sideMap[(DIRECTION_LONG, OFFSET_NONE)] = 1
sideMap[(DIRECTION_LONG, OFFSET_OPEN)] = 1
#sideMap[(DIRECTION_SHORT, OFFSET_NONE)] = 2
sideMap[(DIRECTION_SHORT, OFFSET_CLOSE)] = 2
#sideMap[(DIRECTION_LONG, OFFSET_OPEN)] = 3
sideMap[(DIRECTION_SHORT, OFFSET_OPEN)] = 4
sideMap[(DIRECTION_LONG, OFFSET_CLOSE)] = 5
#sideMap[(DIRECTION_SHORT, OFFSET_CLOSE)] = 6
sideMapReverse = {v: k for k, v in sideMap.items()}
# 交易所类型映射
exchangeMap = {}
exchangeMap[EXCHANGE_SSE] = 1
exchangeMap[EXCHANGE_SZSE] = 2
exchangeMap[EXCHANGE_UNKNOWN] = 3
exchangeMapReverse = {v:k for k,v in exchangeMap.items()}
# 市场类型映射
marketMap = {}
marketMap[EXCHANGE_UNKNOWN] = 0
marketMap[EXCHANGE_SZSE] = 1
marketMap[EXCHANGE_SSE] = 2
marketMapReverse = {v:k for k,v in marketMap.items()}
# 持仓类型映射
#posiDirectionMap = {}
#posiDirectionMap[DIRECTION_NET] = defineDict["THOST_FTDC_PD_Net"]
#posiDirectionMap[DIRECTION_LONG] = defineDict["THOST_FTDC_PD_Long"]
#posiDirectionMap[DIRECTION_SHORT] = defineDict["THOST_FTDC_PD_Short"]
#posiDirectionMapReverse = {v:k for k,v in posiDirectionMap.items()}
# 产品类型映射
productClassMapReverse = {}
productClassMapReverse[0] = PRODUCT_EQUITY
productClassMapReverse[1] = PRODUCT_INDEX
productClassMapReverse[2] = PRODUCT_EQUITY
productClassMapReverse[3] = PRODUCT_EQUITY
productClassMapReverse[4] = PRODUCT_UNKNOWN
# 委托状态映射
statusMapReverse = {}
statusMapReverse[0] = STATUS_UNKNOWN
statusMapReverse[1] = STATUS_ALLTRADED
statusMapReverse[2] = STATUS_PARTTRADED
statusMapReverse[3] = STATUS_CANCELLED
statusMapReverse[4] = STATUS_NOTTRADED
statusMapReverse[5] = STATUS_CANCELLED
statusMapReverse[6] = STATUS_REJECTED
statusMapReverse[7] = STATUS_UNKNOWN
# 业务类型映射
#businessMap = {}
#businessMap[BUSINESS_CASH] = 0
#businessMap[BUSINESS_IPO] = 1
#businessMap[BUSINESS_REPO] = 2
#businessMap[BUSINESS_ETF] = 3
#businessMap[BUSINESS_MARGIN] = 4
#businessMap[BUSINESS_DESIGNATION] = 5
#businessMap[BUSINESS_ALLOTMENT] = 6
#businessMap[BUSINESS_STRUCTURED_FUND_PURCHASE_REDEMPTION] = 7
#businessMap[BUSINESS_STRUCTURED_FUND_SPLIT_MERGE] = 8
#businessMap[BUSINESS_MONEY_FUND] = 9
#businessMap[BUSINESS_UNKNOWN] = 10
#businessMapReverse = {v:k for k,v in businessMap.items()}
# tdx市场类型映射 # 0 - 深圳, 1 - 上海
tdxMarketMap = {}
tdxMarketMap[EXCHANGE_SZSE] = 0
tdxMarketMap[EXCHANGE_SSE] = 1
tdxMarketMapReverse = {v:k for k,v in tdxMarketMap.items()}
########################################################################
class XtpGateway(VtGateway):
"""XTP接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='XTP'):
"""Constructor"""
super(XtpGateway, self).__init__(eventEngine, gatewayName)
self.mdApi = XtpMdApi(self) # 行情API
self.tdApi = XtpTdApi(self) # 交易API
self.tdxApi = None # 通达信股票行情API
self.tdx_pool_count = 3 # 通达信行情连接池
self.mdConnected = False # 行情API连接状态,登录完成后为True
self.tdConnected = False # 交易API连接状态
self.tdxConnected = False # 通达信股票行情API得连接状态
self.qryEnabled = False # 是否要启动循环查询
self.subscribedSymbols = set() # 已订阅股票代码
self.fileName = self.gatewayName + '_connect.json'
self.filePath = getJsonPath(self.fileName, __file__)
# 调试信息:显示tick,显示原始数据
self.debug_display_tick = False
self.debug_raw_data = False
#----------------------------------------------------------------------
def connect(self):
"""连接"""
try:
f = open(self.filePath)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'读取连接配置出错,请检查'
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
f.close()
try:
userID = str(setting['userID'])
password = str(setting['password'])
clientID = int(setting['clientID'])
softwareKey = str(setting['softwareKey'])
tdAddress = str(setting['tdAddress'])
tdPort = int(setting['tdPort'])
mdAddress = str(setting['mdAddress'])
mdPort = int(setting['mdPort'])
self.writeLog(u'使用行情:{}:{}'.format(mdAddress, mdPort))
self.writeLog(u'使用交易:{}:{}'.format(tdAddress, tdPort))
self.debug_display_tick = setting.get('debug_display_tick',False)
self.debug_raw_data = setting.get('debug_raw_data',False)
# 获取tdx配置
tdx_conf = setting.get('tdx', None)
if tdx_conf is not None and isinstance(tdx_conf, dict):
if self.tdxApi is None:
self.writeLog(u'通达信接口未实例化,创建实例')
self.tdxApi = TdxMdApi(self) # 通达信行情API
ip = tdx_conf.get('ip', None)
if ip is not None:
self.writeLog(u'使用配置文件的tdx服务器:{}'.format(ip))
self.tdxApi.best_ip = copy.copy(ip)
# 获取通达信得缺省连接池数量
self.tdx_pool_count = tdx_conf.get('pool_count', self.tdx_pool_count)
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'连接配置缺少字段,请检查'
self.onLog(log)
return
# 创建行情和交易接口对象
self.mdApi.connect(userID, password, clientID, mdAddress, mdPort)
self.tdApi.connect(userID, password, clientID, softwareKey, tdAddress, tdPort)
# 初始化并启动查询
self.initQuery()
for req in list(self.subscribedSymbols):
if self.tdxApi is not None:
self.writeLog(u'启用tdx,连接通达信行情服务器')
self.tdxApi.connect(self.tdx_pool_count)
self.tdxApi.subscribe(req)
else:
self.mdApi.subscribe(req)
# ----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
if len(subscribeReq.symbol)==0:
return
if self.tdxApi is not None:
self.writeLog(u'启用tdx,连接通达信行情服务器')
self.tdxApi.connect(self.tdx_pool_count)
self.tdxApi.subscribe(subscribeReq)
else:
self.mdApi.subscribe(subscribeReq)
self.subscribedSymbols.add(subscribeReq)
if subscribeReq.is_bar:
self.subscribeBar(subscribeReq)
def subscribeBar(self,subscribeReq):
"""订阅1分钟行情"""
symbol = subscribeReq.symbol
if symbol in self.klines:
return
# 创建1分钟bar产生器
self.writeLog(u'创建:{}的一分钟行情'.format(symbol))
bg = BarGenerator(onBar=self.onBar)
self.klines.update({symbol:bg})
#----------------------------------------------------------------------
def unSubscribe(self, subscribeReq):
""""""
self.mdApi.unSubscribe(subscribeReq)
# ----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
return self.tdApi.sendOrder(orderReq)
# ----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.tdApi.sendCancel(cancelOrderReq)
# ----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
if self.tdConnected:
self.tdApi.qryAccount()
# ----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
if self.tdConnected:
self.tdApi.qryPosition()
# ----------------------------------------------------------------------
def close(self):
"""关闭"""
if self.mdConnected:
self.mdApi.close()
if self.tdConnected:
self.tdApi.close()
if self.tdxApi is not None:
self.writeLog(u'断开通达信行情API')
tmp1 = self.tdxApi
self.tdxApi.connection_status = False
self.tdxApi = None
tmp1.close()
self.tdxConnected = False
# ----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount, self.qryPosition]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
# ----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
# ----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
# ----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
def checkStatus(self):
if self.tdxApi is not None:
self.tdxApi.checkStatus()
self.mdApi.checkStatus()
return True
########################################################################
class XtpMdApi(QuoteApi):
"""XTP行情API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(XtpMdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.subscribedSymbols = set() # 已订阅合约代码
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.address = EMPTY_STRING # 服务器地址
self.port = EMPTY_INT # 服务器端口
self.last_tick_dt = None
self.had_disconnected = False
#----------------------------------------------------------------------
def onDisconnected(self, reason):
"""连接断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.mdConnected = False
self.had_disconnected = True
content = (u'行情服务器连接断开,原因:%s' %reason)
self.writeLog(content)
def reconnect(self):
# 重新连接
n = self.login(self.address, self.port, self.userID, self.password, 1)
if not n:
self.connectionStatus = True
self.loginStatus = True
self.gateway.mdConnected = True
self.writeLog(u'行情服务器登录成功')
self.reSubscribe()
# 订阅上交所/深交所的全行情
self.writeLog(u'订阅上交所/深交所的全行情')
self.queryAllTickers(1) # 上交所
self.queryAllTickers(2) # 深交所
else:
self.writeLog(u'行情服务器登录失败,原因:%s' %n)
def checkStatus(self):
# self.writeLog(u'检查tdx接口状态')
if len(self.subscribedSymbols) == 0:
return
# 若还没有启动连接,就启动连接
if self.last_tick_dt is not None:
over_time = (datetime.now() - self.last_tick_dt).total_seconds() > 60
else:
over_time = False
if self.had_disconnected and (not self.connectionStatus or over_time):
self.writeLog(u'XTP没有行情连接,就启动连接')
self.reconnect()
#----------------------------------------------------------------------
def onError(self, error):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['error_id']
err.errorMsg = error['error_msg'] # .decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onSubMarketData(self, data, error, last):
"""订阅行情回报"""
if self.gateway.debug_raw_data:
print(u'onSubMarketData')
print(self.gateway.printDict(data))
#----------------------------------------------------------------------
def onUnSubMarketData(self, data, error, last):
"""退订行情回报"""
if self.gateway.debug_raw_data:
print(u'onSubMarketData')
print(self.gateway.printDict(data))
#----------------------------------------------------------------------
def onDepthMarketData(self, *args):
"""行情推送"""
if self.gateway.debug_raw_data:
print(u'onDepthMarketData')
print(self.gateway.printDict(args[0]))
if len(args)<1:
return
try:
tick = VtTickData()
tick.gatewayName = self.gatewayName
data = args[0]
tick.symbol = data['ticker']
tick.exchange = exchangeMapReverse.get(data['exchange_id'], EXCHANGE_UNKNOWN)
#tick.vtSymbol = '.'.join([tick.symbol, tick.exchange])
tick.vtSymbol = tick.symbol
tick.lastPrice = data['last_price']
tick.volume = data['qty']
#tick.openInterest = data['open_interest'] # 股票tick没有持仓总量
timestamp = str(data['data_time'])
tick.date = timestamp[:4] + '-' + timestamp[4:6] + '-' + timestamp[6:8]
tick.time = '%s:%s:%s.%s' %(timestamp[8:10], timestamp[10:12], timestamp[12:14], timestamp[14])
tick.datetime = datetime.strptime('{} {}'.format(tick.date,tick.time),'%Y-%m-%d %H:%M:%S.%f')
tick.tradingDay = tick.date
tick.openPrice = data['open_price']
tick.highPrice = data['high_price']
tick.lowPrice = data['low_price']
tick.preClosePrice = data['pre_close_price']
tick.upperLimit = data['upper_limit_price']
tick.lowerLimit = data['lower_limit_price']
tick.bidPrice1, tick.bidPrice2, tick.bidPrice3, tick.bidPrice4, tick.bidPrice5 = data['bid'][0:5]
tick.askPrice1, tick.askPrice2, tick.askPrice3, tick.askPrice4, tick.askPrice5 = data['ask'][0:5]
tick.bidVolume1, tick.bidVolume2, tick.bidVolume3, tick.bidVolume4, tick.bidVolume5 = data['bid_qty'][0:5]
tick.askVolume1, tick.askVolume2, tick.askVolume3, tick.askVolume4, tick.askVolume5 = data['ask_qty'][0:5]
self.last_tick_dt=tick.datetime
# 丢弃非交易时间的tick
if tick.datetime.hour not in [9,10,11,13,14]:
return
if tick.datetime.hour == 9 and tick.datetime.minute < 15:
return
if self.gateway.debug_display_tick:
self.writeLog('xtp:{},{},{},{},{},{},{},{},{},{},{},{},{},{}'.format(tick.gatewayName, tick.symbol,
tick.exchange, tick.vtSymbol,
tick.datetime, tick.tradingDay,
tick.openPrice, tick.highPrice,
tick.lowPrice, tick.preClosePrice,
tick.bidPrice1,
tick.bidVolume1, tick.askPrice1,
tick.askVolume1))
self.gateway.onTick(tick)
# 推送Bar
if tick.vtSymbol in self.gateway.klines:
kline = self.gateway.klines.get(tick.vtSymbol)
kline.updateTick(copy.copy(tick))
except Exception as ex:
self.gateway.writeError(u'onDepthMarketData异常:{},{}'.format(str(ex),traceback.format_exc()))
#print(u'onDepthMarketData finished')
#----------------------------------------------------------------------
def onQueryAllTickers(self, data, error, last):
"""合约信息推送"""
if self.gateway.debug_raw_data:
print(u'onQueryAllTickers')
print(self.gateway.printDict(data))
if error and error.get('error_id',None) is not None:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error.get('error_id')
err.errorMsg = error.get('error_msg',EMPTY_STRING) #.decode('gbk')
self.gateway.onError(err)
return
try:
contract = VtContractData()
contract.gatewayName = self.gatewayName
contract.symbol = data.get('ticker','ticker')
contract.exchange = exchangeMapReverse.get(data.get('exchange_id',0), EXCHANGE_UNKNOWN)
contract.vtSymbol = contract.symbol # '.'.join([contract.symbol, contract.exchange])
contract.name = data.get('ticker_name','ticker_name') #.decode('UTF-8')
contract.size = 1
contract.priceTick = data.get('price_tick',0.01)
contract.productClass = productClassMapReverse.get(data.get('ticker_type',None), PRODUCT_UNKNOWN)
self.gateway.onContract(contract)
except Exception as ex:
self.gateway.writeError(u'onQueryAllTickers Exception:{},{}'.format(str(ex),traceback.format_exc()))
#----------------------------------------------------------------------
def onSubOrderBook(self, data, error, last):
""""""
if self.gateway.debug_raw_data:
print(u'onSubOrderBook')
print(self.gateway.printDict(data))
#----------------------------------------------------------------------
def onUnSubOrderBook(self, data, error, last):
""""""
if self.gateway.debug_raw_data:
print(u'onUnSubOrderBook')
print(self.gateway.printDict(data))
#----------------------------------------------------------------------
def onOrderBook(self, data):
""""""
if self.gateway.debug_raw_data:
print(u'onOrderBook')
print(self.gateway.printDict(data))
#----------------------------------------------------------------------
def onSubTickByTick(self, data, error, last):
""""""
if self.gateway.debug_raw_data:
print(u'onSubTickByTick')
print(self.gateway.printDict(data))
#----------------------------------------------------------------------
def onUnSubTickByTick(self, data, error, last):
""""""
if self.gateway.debug_raw_data:
print(u'onUnSubTickByTick')
print(self.gateway.printDict(data))
#----------------------------------------------------------------------
def onTickByTick(self, data):
""""""
if self.gateway.debug_raw_data:
print(u'onTickByTick')
print(self.gateway.printDict(data))
#----------------------------------------------------------------------
def onSubscribeAllMarketData(self, error):
""""""
if self.gateway.debug_raw_data:
print(u'onSubscribeAllMarketData')
print(self.gateway.printDict(error))
#----------------------------------------------------------------------
def onUnSubscribeAllMarketData(self, error):
""""""
if self.gateway.debug_raw_data:
print(u'onUnSubscribeAllMarketData')
print(self.gateway.printDict(error))
#----------------------------------------------------------------------
def onSubscribeAllOrderBook(self, error):
""""""
if self.gateway.debug_raw_data:
print(u'onSubscribeAllOrderBook')
print(self.gateway.printDict(error))
#----------------------------------------------------------------------
def onUnSubscribeAllOrderBook(self, error):
""""""
if self.gateway.debug_raw_data:
print(u'onUnSubscribeAllOrderBook')
print(self.gateway.printDict(error))
#----------------------------------------------------------------------
def onSubscribeAllTickByTick(self, error):
""""""
if self.gateway.debug_raw_data:
print(u'onSubscribeAllTickByTick')
print(self.gateway.printDict(error))
#----------------------------------------------------------------------
def onUnSubscribeAllTickByTick(self, error):
""""""
if self.gateway.debug_raw_data:
print(u'onUnSubscribeAllTickByTick')
print(self.gateway.printDict(error))
#----------------------------------------------------------------------
def onQueryTickersPriceInfo(self, data, error, last):
""""""
if self.gateway.debug_raw_data:
print(u'onQueryTickersPriceInfo')
print(self.gateway.printDict(data))
print(self.gateway.printDict(error))
#----------------------------------------------------------------------
def connect(self, userID, password, clientID, address, port):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.address = address # 服务器地址
self.port = port # 端口号
try:
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
path = os.getcwd() + '/temp/' + self.gatewayName + '/'
if not os.path.exists(path):
os.makedirs(path)
self.createQuoteApi(clientID, path)
# 1 TCP;2:UDP
n = self.login(address, port, userID, password, 1)
if not n:
self.connectionStatus = True
self.loginStatus = True
self.gateway.mdConnected = True
self.writeLog(u'行情服务器登录成功')
self.writeLog(u'查询合约信息')
self.queryAllTickers(1) # 上交所
self.queryAllTickers(2) # 深交所
self.reSubscribe()
else:
self.writeLog(u'行情服务器登录失败,原因:%s' %n)
except Exception as ex:
self.gateway.writeError(u'MdApi connect Exception:{} {}'.format(str(ex),traceback.format_exc()))
def reSubscribe(self):
"""重新订阅行情"""
for req in list(self.subscribedSymbols):
if self.loginStatus:
self.writeLog(u'重新订阅{}行情'.format(req.symbol))
self.subscribeMarketData([{'ticker': str(req.symbol)}], 1,
exchangeMap.get(req.exchange, 3))
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅合约"""
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
if '.' in subscribeReq.symbol:
subscribeReq.symbol = subscribeReq.symbol.split('.')[0]
if self.loginStatus:
self.subscribeMarketData([{'ticker':str(subscribeReq.symbol)}],1,
exchangeMap.get(subscribeReq.exchange,3))
self.subscribedSymbols.add(subscribeReq)
#----------------------------------------------------------------------
def unSubscribe(self, subscribeReq):
""""""
if self.loginStatus:
self.unSubscribeMarketData(str(subscribeReq.symbol),
exchangeMap[subscribeReq.exchange])
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.writeLog(u'xtp gateway close')
self.exit()
#----------------------------------------------------------------------
def writeLog(self, content):
"""记录日志"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = content
self.gateway.onLog(log)
########################################################################
class XtpTdApi(TraderApi):
"""XTP交易API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""API对象的初始化函数"""
super(XtpTdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.address = EMPTY_STRING # 服务器地址
self.port = EMPTY_INT # 服务器端口
self.clientID = EMPTY_INT # 客户编号
self.sessionID = EMPTY_INT # 会话编号
self.orderDict = {} # 委托缓存字典
# ----------------------------------------------------------------------
def onDisconnected(self, session, reason):
"""连接断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.tdConnected = False
content = (u'交易服务器连接断开,原因:%s' %reason)
self.writeLog(content)
try:
# 发起重新连接 1TCP,2UDP
n = self.login(self.address, self.port, self.userID, self.password, 1)
if n:
self.sessionID = n
self.connectionStatus = True
self.loginStatus = True
self.gateway.tdConnected = True
self.writeLog(u'交易服务器登录成功,会话编号:%s' %n)
else:
self.writeLog(u'交易服务器登录失败')
except Exception as ex:
self.gateway.writeError(u'TdApi onDisconnected Exception:{} {}'.format(str(ex),traceback.format_exc()))
# ----------------------------------------------------------------------
def onError(self, error):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['error_id']
err.errorMsg = error['error_msg'] #.decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onOrderEvent(self, data, error, session):
"""委托数据回报"""
print('onOrderEvent:{}'.format(data))
try:
orderID = str(data['order_xtp_id'])
if orderID not in self.orderDict:
# 创建报单数据对象
order = VtOrderData()
order.gatewayName = self.gatewayName
# 保存代码和报单号
order.symbol = data['ticker']
order.exchange = marketMapReverse.get(data['market'], EXCHANGE_UNKNOWN)
order.vtSymbol = order.symbol # '.'.join([order.symbol, order.exchange])
order.orderID = orderID
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.sessionID = self.sessionID
order.frontID = self.getClientIDByXTPID(data['order_xtp_id'])
# 开平和方向
order.direction, order.offset = sideMapReverse.get(data['side'],
(DIRECTION_UNKNOWN, OFFSET_UNKNOWN))
# 不变的字段
order.price = data['price']
order.totalVolume = data['quantity']
order.priceType = priceTypeMapReverse.get(data['price_type'], '')
self.orderDict[orderID] = order
else:
order = self.orderDict[orderID]
# 变化字段
order.status = statusMapReverse.get(data['order_status'], STATUS_UNKNOWN)
order.tradedVolume = data['qty_traded']
if data['insert_time']:
timestamp = str(data['insert_time'])
order.orderTime = '%s:%s:%s' %(timestamp[8:10], timestamp[10:12], timestamp[12:14])
if data['cancel_time']:
timestamp = str(data['cancel_time'])
order.cancelTime = '%s:%s:%s' %(timestamp[8:10], timestamp[10:12], timestamp[12:14])
# 推送
self.gateway.onOrder(order)
# 错误信息
if error['error_id']:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['error_id']
err.errorMsg = u'委托号' + str(order.orderID) + ':' + error['error_msg'] # .decode('gbk')
err.errorTime = order.orderTime
self.gateway.onError(err)
except Exception as ex:
self.gateway.writeError(u'onOrderEvent Exception:{} {}'.format(str(ex),traceback.format_exc()))
#----------------------------------------------------------------------
def onTradeEvent(self, data, session):
"""成交推送"""
#print('onTradeEvent:{}'.format(data))
try:
# 创建报单数据对象
trade = VtTradeData()
trade.gatewayName = self.gatewayName
# 保存代码和报单号
trade.symbol = data['ticker']
trade.exchange = marketMapReverse.get(data['market'], EXCHANGE_UNKNOWN)
trade.vtSymbol = trade.symbol # '.'.join([trade.symbol, trade.exchange])
trade.tradeID = str(data['exec_id'])
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
orderID = str(data['order_xtp_id'])
trade.orderID = orderID
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
# 开平和方向
trade.direction, trade.offset = sideMapReverse.get(data['side'],
(DIRECTION_UNKNOWN, OFFSET_UNKNOWN))
# 价格、报单量等数值
trade.price = data['price']
trade.volume = data['quantity']
if data['trade_time']:
timestamp = str(data['trade_time'])
trade.tradeTime = '%s:%s:%s' %(timestamp[8:10], timestamp[10:12], timestamp[12:14])
# 推送
self.gateway.onTrade(trade)
# 更新委托数据
order = self.orderDict.get(orderID, None)
if (not order or
order.status is STATUS_ALLTRADED or
order.status is STATUS_CANCELLED):
return
order.tradedVolume += trade.volume
if order.status is STATUS_NOTTRADED:
order.status = STATUS_PARTTRADED
self.gateway.onOrder(order)
except Exception as ex:
self.gateway.writeError(u'onTradeEvent Exception:{} {}'.format(str(ex),traceback.format_exc()))
#----------------------------------------------------------------------
def onCancelOrderError(self, data, error, session):
"""撤单错误回报"""
print('onCancelOrderError')
try:
if error['error_id']:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['error_id']
err.errorMsg = u'委托号' + str(data['order_xtp_id']) + ':' + error['error_msg'] # .decode('gbk')
self.gateway.onError(err)
except Exception as ex:
self.gateway.writeError(u'onCancelOrderError Exception:{} {}'.format(str(ex), traceback.format_exc()))
#----------------------------------------------------------------------
def onQueryOrder(self, data, error, reqid, last, session):
"""委托查询回报"""
print('onQueryOrder')
#----------------------------------------------------------------------
def onQueryTrade(self, data, error, reqid, last, session):
"""成交查询回报"""
print('onQueryTrade')
#----------------------------------------------------------------------
def onQueryPosition(self, data, error, reqid, last, session):
"""查询持仓回报"""
#print('onQueryPosition:{}'.format(data))
try:
pos = VtPositionData()
pos.gatewayName = self.gatewayName
# 保存代码
pos.symbol = data['ticker']
pos.exchange = marketMapReverse.get(data['market'], EXCHANGE_UNKNOWN)
pos.vtSymbol = pos.symbol # '.'.join([pos.symbol, pos.exchange])
pos.name = data['ticker_name'] #.decode('UTF-8')
# 方向和持仓冻结数量
pos.direction = DIRECTION_LONG
pos.position = data.get('total_qty',0)
pos.frozen = pos.position - data.get('sellable_qty',0)
pos.ydPosition = data.get('yesterday_position',0)
pos.price = data.get('avg_price',0.0)
# VT系统持仓名
pos.vtPositionName = '.'.join([pos.vtSymbol, pos.direction])
# 推送
self.gateway.onPosition(pos)
except Exception as ex:
self.gateway.writeError('onQueryPosition Exception:{},{}'.format(str(ex),traceback.format_exc()))
#----------------------------------------------------------------------
def onQueryAsset(self, data, error, reqid, last, session):
"""账户查询回报"""
if self.gateway.debug_raw_data:
print('onQueryAsset')
print(self.gateway.printDict(data))
try:
account = VtAccountData()
account.gatewayName = self.gatewayName
# 账户代码
account.accountID = self.userID
account.vtAccountID = '.'.join([self.gatewayName, account.accountID])
# 数值相关
account.balance = float(data['total_asset'])
account.available = float(data['buying_power'])
account.commission = float(data['fund_buy_fee']) + float(data['fund_sell_fee'])
# 推送
self.gateway.onAccount(account)
except Exception as ex:
self.gateway.writeError('onQueryAsset Exception:{},{}'.format(str(ex), traceback.format_exc()))
#----------------------------------------------------------------------
def onQueryStructuredFund(self, data, error, reqid, last, session):
""""""
if self.gateway.debug_raw_data:
print(u'onQueryStructuredFund')
print(self.gateway.printDict(data))
#----------------------------------------------------------------------
def onQueryFundTransfer(self, data, error, reqid, last, session):
""""""
if self.gateway.debug_raw_data:
print(u'onQueryFundTransfer')
print(self.gateway.printDict(data))
#----------------------------------------------------------------------
def onFundTransfer(self, data, error, session):
""""""
if self.gateway.debug_raw_data:
print(u'onFundTransfer')
print(self.gateway.printDict(data))
#----------------------------------------------------------------------
def onQueryETF(self, data, error, reqid, last, session):
""""""
if self.gateway.debug_raw_data:
print(u'onQueryETF')
print(self.gateway.printDict(data))
#----------------------------------------------------------------------
def onQueryETFBasket(self, data, error, reqid, last, session):
""""""
if self.gateway.debug_raw_data:
print(u'onQueryETFBasket')
print(self.gateway.printDict(data))
#----------------------------------------------------------------------
def onQueryIPOInfoList(self, data, error, reqid, last, session):
""""""
if self.gateway.debug_raw_data:
print(u'onQueryIPOInfoList')
print(self.gateway.printDict(data))
#----------------------------------------------------------------------
def onQueryIPOQuotaInfo(self, data, error, reqid, last, session):
""""""
if self.gateway.debug_raw_data:
print(u'onQueryIPOQuotaInfo')
print(self.gateway.printDict(data))
#----------------------------------------------------------------------
def connect(self, userID, password, clientID, softwareKey, address, port):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.address = address # 服务器地址
self.port = port # 端口号
self.clientID = clientID
try:
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
path = os.getcwd() + '/temp/' + self.gatewayName + '/'
if not os.path.exists(path):
os.makedirs(path)
self.createTraderApi(clientID, path)
# 设置软件编码,认证用
self.setSoftwareKey(softwareKey)
# 设置订单和成交回报重传模式
self.subscribePublicTopic(0)
# 发起登录
n = self.login(address, port, userID, password, 1)
if n:
self.sessionID = n
self.connectionStatus = True
self.loginStatus = True
self.gateway.tdConnected = True
self.writeLog(u'交易服务器登录成功,会话编号:%s' %n)
else:
self.writeLog(u'交易服务器登录失败')
except Exception as ex:
self.gateway.writeError(u'TdApi connect Exception:{} {}'.format(str(ex), traceback.format_exc()))
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户"""
if self.sessionID:
self.reqID += 1
self.queryAsset(self.sessionID, self.reqID)
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
if self.sessionID:
self.reqID += 1
self.queryPosition('', self.sessionID, self.reqID)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
if '.' in orderReq.symbol:
orderReq.symbol = orderReq.symbol.split('.')[0]
req = {}
req['ticker'] = orderReq.symbol
req['price'] = orderReq.price
# 这里若下单是股票需要强制转化为int
req['quantity'] = int(orderReq.volume)
req['price_type'] = priceTypeMap.get(orderReq.priceType, 0)
req['market'] = marketMap.get(orderReq.exchange, 0)
req['business_type'] = 0 # 目前只支持买卖业务
# 目前尚未支持衍生品交易,因此不适用
#req['side'] = sideMap.get((orderReq.direction, OFFSET_NONE), 0)
if orderReq.direction == DIRECTION_LONG:
req['side'] = 1
req['position_effect'] = 1
else:
req['side'] = 2
req['position_effect'] = 2
if self.gateway.debug_raw_data:
print(u'xtp send order:{}'.format(self.gateway.printDict(req)))
# 发出委托
orderID = str(self.insertOrder(req, self.sessionID))
vtOrderID = '.'.join([self.gatewayName, orderID])
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = orderReq.symbol
order.exchange = orderReq.exchange
order.vtSymbol = orderReq.symbol # + '.' +order.exchange
order.orderID = orderID
order.vtOrderID = vtOrderID
order.direction = orderReq.direction
order.offset = OFFSET_OPEN if order.direction == DIRECTION_LONG else OFFSET_CLOSE
order.price = orderReq.price
order.totalVolume = int(orderReq.volume)
order.tradedVolume = 0
order.status = STATUS_UNKNOWN
order.orderTime = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.gateway.onOrder(order)
# 返回订单号(字符串),便于某些算法进行动态管理
return vtOrderID
#----------------------------------------------------------------------
def sendCancel(self, cancelOrderReq):
"""撤单,因为cancelOrder的命名已经被原生接口使用了,所以改为sendCancel"""
self.cancelOrder(int(cancelOrderReq.orderID), self.sessionID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
#----------------------------------------------------------------------
def writeLog(self, content):
"""记录日志"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = content
self.gateway.onLog(log)
class TdxMdApi():
"""
通达信股票数据行情API实现
通过线程池,仅仅查询订阅的行情,更新合约的数据
"""
def __init__(self, gateway):
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.req_interval = 0.5 # 操作请求间隔500毫秒
self.req_id = EMPTY_INT # 操作请求编号
self.connection_status = False # 连接状态
self.symbol_exchange_dict = {} # tdx合约与vn交易所的字典
self.symbol_market_dict = {} # tdx合约与tdx市场的字典
self.symbol_vn_dict = {} # tdx合约与vtSymbol的对应
self.symbol_tick_dict = {} # tdx合约与最后一个Tick得字典
self.registed_symbol_set = set()
self.pool = None # 线程池
self.best_ip = None #'180.153.39.51'
self.api_dict = {} # API 的连接会话对象字典
self.last_tick_dt = {} # 记录该会话对象的最后一个tick时间
self.instrument_count = 50000
def connect(self,n=3):
"""
连接通达讯行情服务器
:param n:
:return:
"""
if self.connection_status:
for api in self.api_dict:
if api is not None or getattr(api,"client",None) is not None:
self.writeLog(u'当前已经连接,不需要重新连接')
return
self.writeLog(u'开始选择通达信股票行情服务器')
# 选取最佳服务器
from pytdx.util.best_ip import select_best_ip
if self.best_ip is None:
self.best_ip = select_best_ip()
print(u'best_ip:{}'.format(self.best_ip))
# 创建n个api连接对象实例
for i in range(n):
try:
api = TdxHq_API( heartbeat=True, auto_retry=True,raise_exception=True)
api.connect(self.best_ip, 7709)
# 尝试获取市场合约统计
c =api.get_security_count(0)
if c is None or c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip,7709)
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = -1
err.errorMsg = err_msg
self.gateway.onError(err)
else:
self.writeLog(u'创建第{}个tdx股票连接'.format(i+1))
self.api_dict[i] = api
self.last_tick_dt[i] = datetime.now()
self.connection_status = True
self.instrument_count = c
except Exception as ex:
self.writeError(u'连接服务器tdx[{}]异常:{},{}'.format(i,str(ex),traceback.format_exc()))
return
# 更新 symbol_exchange_dict , symbol_market_dict
self.qryInstrument()
#self.req_thread = Thread(target=self.addReq)
#self.req_thread.start()
# 创建连接池,每个连接都调用run方法
self.pool = Pool(n)
self.pool.map_async(self.run,range(n))
def reconnect(self,i):
"""
重连
:param i:
:return:
"""
try:
self.writeLog(u'重新选择通达信股票行情服务器')
# 选取最佳服务器
from pytdx.util.best_ip import select_best_ip
self.best_ip = select_best_ip()
print(u'best_ip:{}'.format(self.best_ip))
api = TdxHq_API(heartbeat=True, auto_retry=True)
api.connect(self.best_ip,709)
# 尝试获取市场合约统计
c = api.get_security_count(0)
if c is None or c < 10:
err_msg = u'该服务器IP {}无响应'.format(self.best_ip)
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = -1
err.errorMsg = err_msg
self.gateway.onError(err)
else:
self.writeLog(u'重新创建第{}个tdx股票连接'.format(i + 1))
self.api_dict[i] = api
sleep(1)
except Exception as ex:
self.writeError(u'重新连接服务器tdx[{}]异常:{},{}'.format(i, str(ex), traceback.format_exc()))
return
def close(self):
"""退出API"""
self.connection_status = False
if self.pool is not None:
self.pool.close()
self.pool.join()
# ----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅合约"""
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
vn_symbol = str(subscribeReq.symbol)
self.writeLog(u'通达信行情订阅 {}'.format(str(vn_symbol)))
if len(vn_symbol)==0:
return
tdx_code = vn_symbol.split('.')[0]
self.writeLog(u'{}=>{}'.format(vn_symbol,tdx_code))
self.symbol_vn_dict[tdx_code] = vn_symbol
if tdx_code not in self.registed_symbol_set:
self.registed_symbol_set.add(tdx_code)
self.checkStatus()
def checkStatus(self):
#self.writeLog(u'检查tdx接口状态')
if len(self.registed_symbol_set) ==0:
return
# 若还没有启动连接,就启动连接
over_time = [((datetime.now()-dt).total_seconds() > 60) for dt in self.last_tick_dt.values()]
if not self.connection_status or len(self.api_dict) == 0 or any(over_time):
self.writeLog(u'tdx还没有启动连接,就启动连接')
self.close()
self.pool = None
self.api_dict = {}
pool_cout = getattr(self.gateway,'tdx_pool_count',3)
self.connect(pool_cout)
self.writeLog(u'tdx接口状态正常')
def qryInstrument(self):
"""
查询/更新股票信息
:return:
"""
if not self.connection_status:
return
api = self.api_dict.get(0)
if api is None:
self.writeLog(u'取不到api连接,更新合约信息失败')
return
for market_id in tdxMarketMap.values():
exchange = tdxMarketMapReverse.get(market_id)
self.writeLog(u'查询{}的所有股票清单'.format(exchange))
start_no = 0
while(True):
# 取得所有的合约信息
stocks = api.get_security_list(market_id,start_no)
stocks_len = len(stocks)
if stocks_len == 0:
break
start_no += stocks_len
if start_no > 15000:
break
for stock in stocks:
#OrderedDict([('code', '880643'),
# ('volunit', 100),
# ('decimal_point', 2),
# ('name', '食品加工'),
# ('pre_close', 2158.570068359375)]),
tdx_symbol = stock.get('code', None)
self.symbol_exchange_dict[tdx_symbol] = exchange
self.writeLog(u'{}下一共:{}个代码'.format(exchange,start_no))
def run(self, i):
"""
版本1:Pool内得线程,持续运行,每个线程从queue中获取一个请求并处理
版本2:Pool内线程,从订阅合约集合中,取出符合自己下标 mode n = 0的合约,并发送请求
:param i:
:return:
"""
"""
# 版本1
while self.connection_status:
try:
req = self.queue.get(timeout=self.req_interval)
self.processReq(req,i)
except Exception as ex:
self.writeLog(u'tdx[{}] exception:{},{}'.format(i,str(ex),traceback.format_exc()))
"""
# 版本2:
try:
api_count = len(self.api_dict)
last_dt = datetime.now()
self.writeLog(u'开始运行tdx[{}],{}'.format(i,last_dt))
while self.connection_status:
symbols = set()
for idx,tdx_symbol in enumerate(list(self.registed_symbol_set)):
#self.writeLog(u'tdx[{}], api_count:{}, idx:{}, tdx_symbol:{}'.format(i, api_count, idx, tdx_symbol))
if idx % api_count == i:
try:
symbols.add(tdx_symbol)
self.processReq(tdx_symbol, i)
except BrokenPipeError as bex:
self.writeError(u'BrokenPipeError{},重试重连tdx[{}]'.format(str(bex),i))
self.reconnect(i)
sleep(5)
break
except Exception as ex:
self.writeError(u'tdx[{}] exception:{},{}'.format(i, str(ex), traceback.format_exc()))
#api = self.api_dict.get(i,None)
#if api is None or getattr(api,'client') is None:
self.writeError(u'重试重连tdx[{}]'.format(i))
print(u'重试重连tdx[{}]'.format(i),file=sys.stderr)
self.reconnect(i)
#self.writeLog(u'tdx[{}] sleep'.format(i))
sleep(self.req_interval)
dt = datetime.now()
if last_dt.minute != dt.minute:
self.writeLog('tdx[{}] check point. {}, process symbols:{}'.format(i,dt,symbols))
last_dt = dt
except Exception as ex:
self.writeError(u'tdx[{}] pool.run exception:{},{}'.format(i, str(ex), traceback.format_exc()))
self.writeError(u'tdx[{}] {}退出'.format(i,datetime.now()))
def __select_market_code(self,code):
code = str(code)
if len(code)<3:
self.writeLog(u'代码{}不足长度:3'.format(code))
return 0
if code[0] in ['5', '6', '9'] or code[:3] in ["009", "126", "110", "201", "202", "203", "204"]:
return 1
return 0
def processReq(self, req, i):
"""
处理行情信息ticker请求
:param req:
:param i:
:return:
"""
symbol = req
api = self.api_dict.get(i, None)
if api is None:
self.writeLog(u'tdx[{}] Api is None'.format(i))
raise Exception(u'tdx[{}] Api is None'.format(i))
market_id = self.symbol_market_dict.get(symbol,self.__select_market_code(symbol))
#self.writeLog(u'tdx[{}] get_security_quotes:({},{})'.format(i,market_id, symbol))
rt_list = []
try:
rt_list = api.get_security_quotes([(market_id,symbol)])
except Exception as ex:
self.writeLog(u'获取行情异常:{}'.format(str(ex)))
if len(rt_list) == 0:
self.writeLog(u'tdx[{}]: rt_list为空'.format(i))
return
#else:
# self.writeLog(u'tdx[{}]: rt_list数据:{}'.format(i, rt_list))
if i in self.last_tick_dt:
self.last_tick_dt[i] = datetime.now()
for d in list(rt_list):
# 忽略成交量为0的无效单合约tick数据
if d.get('cur_vol', 0) <= 0:
self.writeLog(u'忽略成交量为0的无效单合约tick数据:{}'.format(d))
continue
code = d.get('code',None)
if symbol != code and code is not None:
#self.writeLog(u'忽略合约{} {} 不一致的tick数据:{}'.format(symbol,d.get('code'),rt_list))
#continue
symbol = code
tick = VtTickData()
tick.gatewayName = self.gatewayName
tick.symbol = self.symbol_vn_dict.get(symbol,symbol)
tick.symbol = tick.symbol.upper()
exchange = self.symbol_exchange_dict.get(symbol)
if exchange is None:
market_id = self.__select_market_code(symbol)
exchange = tdxMarketMapReverse.get(market_id)
self.symbol_exchange_dict.update({symbol:exchange})
tick.exchange = exchange
tick.vtSymbol = tick.symbol
tick.preClosePrice = d.get('last_close')
tick.highPrice = d.get('high')
tick.openPrice = d.get('open')
tick.lowPrice = d.get('low')
tick.lastPrice = d.get('price')
tick.volume = d.get('vol',0)
tick.lastVolume = d.get('cur_vol',0)
tick.datetime = datetime.now()
# 修正毫秒
last_tick = self.symbol_tick_dict.get(symbol,None)
if (last_tick is not None) and tick.datetime.replace(microsecond=0) == last_tick.datetime:
# 与上一个tick的时间(去除毫秒后)相同,修改为500毫秒
tick.datetime = tick.datetime.replace(microsecond=500)
tick.time = tick.datetime.strftime('%H:%M:%S.%f')[0:12]
else:
tick.datetime = tick.datetime.replace(microsecond=0)
tick.time = tick.datetime.strftime('%H:%M:%S.%f')[0:12]
tick.date = tick.datetime.strftime('%Y-%m-%d')
tick.tradingDay = tick.date
# 指数没有涨停和跌停,就用昨日收盘价正负10%
tick.upperLimit = tick.preClosePrice * 1.1
tick.lowerLimit = tick.preClosePrice * 0.9
# 5档行情
tick.bidPrice1 = d.get('bid1')
tick.bidVolume1 = d.get('bid_vol1')
tick.bidPrice2 = d.get('bid2')
tick.bidVolume2 = d.get('bid_vol2')
tick.bidPrice3 = d.get('bid3')
tick.bidVolume3 = d.get('bid_vol3')
tick.bidPrice4 = d.get('bid4')
tick.bidVolume4 = d.get('bid_vol4')
tick.bidPrice5 = d.get('bid5')
tick.bidVolume5 = d.get('bid_vol5')
tick.askPrice1 = d.get('ask1')
tick.askVolume1 = d.get('ask_vol1')
tick.askPrice2 = d.get('ask2')
tick.askVolume2 = d.get('ask_vol12')
tick.askPrice3 = d.get('ask3')
tick.askVolume3 = d.get('ask_vol3')
tick.askPrice4 = d.get('ask4')
tick.askVolume4 = d.get('ask_vol4')
tick.askPrice5 = d.get('ask5')
tick.askVolume5 = d.get('ask_vol5')
if tick.datetime.hour not in [9,10,11,13,14]:
return
if tick.datetime.hour == 9 and tick.datetime.minute < 15:
return
if self.gateway.debug_display_tick:
self.writeLog('tdx:{},{},{},{},{},{},{},{},{},{},{},{},{},{}'.format(tick.gatewayName, tick.symbol,
tick.exchange, tick.vtSymbol,
tick.datetime, tick.tradingDay,
tick.openPrice, tick.highPrice,
tick.lowPrice, tick.preClosePrice,
tick.bidPrice1,
tick.bidVolume1, tick.askPrice1,
tick.askVolume1))
self.gateway.writeLog(
'ask2:{},a_vol2:{},bid2:{},b_vol2:{}'.format(tick.askPrice2, tick.askVolume2, tick.bidPrice2,
tick.bidVolume2))
self.symbol_tick_dict[symbol] = tick
self.gateway.onTick(tick)
# 推送Bar
if tick.vtSymbol in self.gateway.klines:
kline = self.gateway.klines.get(tick.vtSymbol)
kline.updateTick(copy.copy(tick))
# ----------------------------------------------------------------------
def writeLog(self, content):
"""发出日志"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = content
self.gateway.onLog(log)
def writeError(self,content):
self.gateway.writeError(content)
|
multiobj_ops_threads.py
|
import time
class SomeClass(object):
def __init__(self):
self.c=0
def do(self):
for i in range(100):
time.sleep(0.01) # sleep makes the os continue another thread
self.c = self.c + 1
print( self.c)
import threading
threads = []
for _ in range(100):
x = SomeClass()
threading.Thread( target=x.do ).start()
for th in threads:
th.join()
|
smoke-tests.py
|
"""Basic tests to ensure all the browsers are launchable with the COM server
"""
import sys, os, unittest, threading, time, multiprocessing, BaseHTTPServer
from win32com import client #http://sourceforge.net/projects/pywin32/files/pywin32/
SERVER_ADDRESS = ('127.0.0.1', 9393)
SERVER_PAGE = """
<html>
<head>
<title>Title</title>
</head>
<body>
<a id="link">Test page</a>
</body>
</html>
"""
class Suite(unittest.TestCase):
def test_list(self):
lst = CreateObject("Selenium.List")
for i in range(0, 10):
lst.add(i)
self.assertEqual(10, lst.Count)
def test_firefox(self):
self.assert_browser_display_page("Selenium.FirefoxDriver")
def test_iedriver(self):
self.assert_browser_display_page("Selenium.IEDriver")
def test_chrome(self):
self.assert_browser_display_page("Selenium.ChromeDriver")
def test_opera(self):
self.assert_browser_display_page("Selenium.OperaDriver")
def test_phantomjs(self):
self.assert_browser_display_page("Selenium.PhantomJSDriver")
def assert_browser_display_page(self, progid):
driver = CreateObject(progid)
try:
driver.get("http://%s:%s" % SERVER_ADDRESS)
txt = driver.FindElementById('link').Text
self.assertEqual("Test page", txt)
finally:
driver.quit
def CreateObject(progid):
return client.Dispatch(progid)
def RunHTTPServer():
server = BaseHTTPServer.HTTPServer(SERVER_ADDRESS, HTTPServerHandler)
server.serve_forever()
class HTTPServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def handle(self):
try:
return BaseHTTPServer.BaseHTTPRequestHandler.handle(self)
except: return
def log_message(self, format, *args):
return
def do_GET(s):
s.send_response(200)
s.send_header('Content-type', 'text/html')
s.end_headers()
s.wfile.write(SERVER_PAGE)
if __name__ == '__main__':
print __doc__
print "Start tests ...\n"
server = multiprocessing.Process(target=RunHTTPServer)
server.start()
try:
unittest.main()
except SystemExit: pass
|
bridge_dino_carla.py
|
#!/usr/bin/env python3
# type: ignore
import time
import math
import atexit
import numpy as np
import threading
import random
import cereal.messaging as messaging
import argparse
from common.params import Params
from common.realtime import Ratekeeper
from lib.helpers import FakeSteeringWheel
STEER_RATIO = 25.
parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')
parser.add_argument('--autopilot', action='store_true')
parser.add_argument('--joystick', action='store_true')
parser.add_argument('--realmonitoring', action='store_true')
# --long_test creates a vehicle in front in order to test longitudinal control
parser.add_argument('--long_test', action='store_true')
# --hil enables human control of the Carla vehicle by syncing dyno and carla speeds.
# Lateral control is managed by Carla's autopilot
parser.add_argument('--hil', action='store_true')
args = parser.parse_args()
#pm = messaging.PubMaster(['frame', 'sensorEvents', 'can'])
# We only want to send Carla frames to Openpilot
pm = messaging.PubMaster(['frame'])
W, H = 1164, 874
def steer_rate_limit(old, new):
# Rate limiting to 0.5 degrees per step
limit = 0.5
if new > old + limit:
return old + limit
elif new < old - limit:
return old - limit
else:
return new
def cam_callback(image):
img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
img = np.reshape(img, (H, W, 4))
img = img[:, :, [0, 1, 2]].copy()
dat = messaging.new_message('frame')
dat.frame = {
"frameId": image.frame,
"image": img.tostring(),
"transform": [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
}
pm.send('frame', dat)
def imu_callback(imu):
#print(imu, imu.accelerometer)
dat = messaging.new_message('sensorEvents', 2)
dat.sensorEvents[0].sensor = 4
dat.sensorEvents[0].type = 0x10
dat.sensorEvents[0].init('acceleration')
dat.sensorEvents[0].acceleration.v = [imu.accelerometer.x, imu.accelerometer.y, imu.accelerometer.z]
# copied these numbers from locationd
dat.sensorEvents[1].sensor = 5
dat.sensorEvents[1].type = 0x10
dat.sensorEvents[1].init('gyroUncalibrated')
dat.sensorEvents[1].gyroUncalibrated.v = [imu.gyroscope.x, imu.gyroscope.y, imu.gyroscope.z]
pm.send('sensorEvents', dat)
def health_function():
pm = messaging.PubMaster(['health'])
health_sock = messaging.pub_sock('health')
rk = Ratekeeper(1.0)
while 1:
dat = messaging.new_message('health')
dat.valid = True
dat.health = {
'ignitionLine': True,
'ignition_can': True,
'hwType': "greyPanda",
'controlsAllowed': True
}
pm.send('health', dat)
rk.keep_time()
def fake_driver_monitoring():
if args.realmonitoring:
return
pm = messaging.PubMaster(['driverState'])
while 1:
dat = messaging.new_message('driverState')
dat.driverState.faceProb = 1.0
pm.send('driverState', dat)
time.sleep(0.1)
def go():
# health_function and fake_driver_monitoring are only needed if there is no Panda connected
#threading.Thread(target=health_function).start()
#threading.Thread(target=fake_driver_monitoring).start()
client = carla.Client("127.0.0.1", 2000)
client.set_timeout(5.0)
world = client.load_world('Town04')
settings = world.get_settings()
settings.fixed_delta_seconds = 0.05
world.apply_settings(settings)
weather = carla.WeatherParameters(
cloudiness=0.1,
precipitation=0.0,
precipitation_deposits=0.0,
wind_intensity=0.0,
sun_azimuth_angle=15.0,
sun_altitude_angle=75.0)
world.set_weather(weather)
blueprint_library = world.get_blueprint_library()
# for blueprint in blueprint_library.filter('sensor.*'):
# print(blueprint.id)
# exit(0)
world_map = world.get_map()
vehicle_bp = random.choice(blueprint_library.filter('vehicle.tesla.model3'))
vehicle = world.spawn_actor(vehicle_bp, world_map.get_spawn_points()[16]) #Point 283 is right in front for long control
max_steer_angle = vehicle.get_physics_control().wheels[0].max_steer_angle
# make tires less slippery
# wheel_control = carla.WheelPhysicsControl(tire_friction=5)
physics_control = vehicle.get_physics_control()
physics_control.mass = 2326
# physics_control.wheels = [wheel_control]*4
physics_control.torque_curve = [[20.0, 500.0], [5000.0, 500.0]]
physics_control.gear_switch_time = 0.0
vehicle.apply_physics_control(physics_control)
if args.long_test:
tm = client.get_trafficmanager()
tm_port = tm.get_port()
vehicle_test_bp = random.choice(blueprint_library.filter('vehicle.tesla.model3'))
vehicle_test = world.spawn_actor(vehicle_test_bp, world_map.get_spawn_points()[283]) # Point 283 is right in front for long control
vehicle_test.apply_physics_control(physics_control)
vehicle_test.set_autopilot(True, tm_port)
tm.vehicle_percentage_speed_difference(vehicle_test, -10)
if args.hil:
if not args.long_test:
tm = client.get_trafficmanager()
tm_port = tm.get_port()
vehicle.set_autopilot(True, tm_port)
tm.ignore_lights_percentage(vehicle, 100)
tm.distance_to_leading_vehicle(vehicle, 0)
if args.autopilot:
vehicle.set_autopilot(True)
# print(vehicle.get_speed_limit())
blueprint = blueprint_library.find('sensor.camera.rgb')
blueprint.set_attribute('image_size_x', str(W))
blueprint.set_attribute('image_size_y', str(H))
blueprint.set_attribute('fov', '70')
blueprint.set_attribute('sensor_tick', '0.05')
transform = carla.Transform(carla.Location(x=0.8, z=1.45))
camera = world.spawn_actor(blueprint, transform, attach_to=vehicle)
camera.listen(cam_callback)
# reenable IMU
imu_bp = blueprint_library.find('sensor.other.imu')
imu = world.spawn_actor(imu_bp, transform, attach_to=vehicle)
#imu.listen(imu_callback)
def destroy():
print("clean exit")
imu.destroy()
camera.destroy()
vehicle.destroy()
print("done")
atexit.register(destroy)
# controls loop
getcontrols = messaging.SubMaster(['carControl', 'carState','controlsState'])
carla_state = messaging.PubMaster(['carlaState'])
rk = Ratekeeper(100, print_delay_threshold=0.05)
# init
#A_throttle = 2.
#A_brake = 2.
A_steer_torque = 1.
fake_wheel = FakeSteeringWheel()
is_openpilot_engaged = False
in_reverse = False
throttle_out = 0
brake_out = 0
steer_out = steer_op = 0.0
old_steer = steer_out
vc = carla.VehicleControl(throttle=0, steer=0, brake=0, reverse=False)
while 1:
cruise_button = 0
vel = vehicle.get_velocity()
speed = math.sqrt(vel.x**2 + vel.y**2 + vel.z**2) * 3.6
getcontrols.update(0)
#print('sendcan update')
# The angle of the Carla vehicle is sent to controlsd as the steering angle of the vehicle
angle_carla = messaging.new_message('carlaState')
angle_carla.carlaState = {"angle": steer_op}
carla_state.send('carlaState', angle_carla)
# Get controls from Openpilot
throttle_op = getcontrols['carControl'].actuators.gas # [0,1]
brake_op = getcontrols['carControl'].actuators.brake # [0,1]
vel_dino = getcontrols['carState'].vEgo #mps
steer_op = getcontrols['controlsState'].angleSteersDes # degrees [-180,180]
#print("steer_op = {}".format(steer_out))
steer_out = steer_op
#steer_out = steer_rate_limit(old_steer, steer_out)
old_steer = steer_out
steer_carla = steer_out / (max_steer_angle * STEER_RATIO * -1)
steer_carla = np.clip(steer_carla, -1, 1)
#print("vel_dino = {}".format(vel_dino*2.2))
# OP reads in meters per second
# Carla reads in meters per second
if abs(vel_dino - speed ) > 0.1:
# Get the coordinates of a vector that points in the direction the vehicle is going and project the speed in that direction
fwd = vehicle.get_transform().rotation.get_forward_vector()
vehicle.set_velocity(carla.Vector3D(vel_dino * fwd.x,
vel_dino * fwd.y, vel_dino * fwd.z))
#vel = vehicle.get_velocity()
#speed = math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2)
#print("carla_speed = {}".format(speed*2.2))
#print("steer_carla = {}".format(steer_carla))
throttle_out = throttle_op/0.6
brake_out = brake_op
#steer_angle_out = fake_wheel.angle
# print(steer_torque_op)
# print(steer_angle_out)
#vc = carla.VehicleControl(throttle=throttle_out, steer=steer_angle_out / 3.14, brake=brake_out, reverse=in_reverse)
vc.throttle = throttle_out
#print('Throttle_Carla = {}'.format(throttle_out))
if throttle_out != 0.0 or brake_out != 0.0:
vc.steer = steer_carla
else:
vc.steer = 0
vc.brake = brake_out
# Openpilot controls are only applied if we're not running with human control
if not args.hil:
vehicle.apply_control(vc)
elif args.hil:
fwd = vehicle.get_transform().rotation.get_forward_vector()
vehicle.set_velocity(carla.Vector3D(vel_dino * fwd.x,
vel_dino * fwd.y, vel_dino * fwd.z))
# Code below changes the speed of the vehicle in front to whatever value we want
#fwd_test = vehicle_test.get_transform().rotation.get_forward_vector()
#vel_test = vehicle_test.get_velocity()
#speed_test = math.sqrt(vel_test.x ** 2 + vel_test.y ** 2 + vel_test.z ** 2) * 3.6
# vehicle_test.set_velocity(carla.Vector3D(vel_dino * fwd_test.x,
# vel_dino * fwd_test.y, vel_dino * fwd_test.z))
#print("speed_test = {}".format(speed_test*0.62))
rk.keep_time()
if __name__ == "__main__":
params = Params()
params.delete("Offroad_ConnectivityNeeded")
from selfdrive.version import terms_version, training_version
params.put("HasAcceptedTerms", terms_version)
params.put("CompletedTrainingVersion", training_version)
params.put("CommunityFeaturesToggle", "1")
params.put("CalibrationParams", '{"vanishing_point": [582.06, 442.78], "valid_blocks": 20, "calib_radians":[0, -0.0036804510179076896, -0.001153260986851604]}')
# no carla, still run
try:
import carla
except ImportError:
print("WARNING: NO CARLA")
while 1:
time.sleep(1)
from multiprocessing import Process, Queue
#q = Queue()
#p = Process(target=go)
#p.daemon = True
#p.start()
go()
# We don't want to control the Carla vehicle with the keyboard so the lines below are commented out
#if args.joystick:
# start input poll for joystick
# from lib.manual_ctrl import wheel_poll_thread
# wheel_poll_thread(q)
#else:
# start input poll for keyboard
# from lib.keyboard_ctrl import keyboard_poll_thread
# keyboard_poll_thread(q)
|
row_mat_bit_no_rec.py
|
#!/usr/bin/python
#
# This file is part of PyRQA.
# Copyright 2015 Tobias Rawald, Mike Sips.
"""
RQA, Fixed Radius, OpenCL, RowMatBitNoRec
"""
import numpy as np
import os
import pyopencl as cl
import threading
import Queue
from ....abstract_classes import AbstractRunnable
from ....opencl import OpenCL
from ....processing_order import Diagonal
from ....recurrence_analysis import RQASubMatricesCarryover
from ....result import RQAResult
from ....runtimes import Runtimes
class RowMatBitNoRec(RQASubMatricesCarryover, AbstractRunnable):
"""
Input Data Representation: Column-Store
Similarity Value Materialisation: Yes
Similarity Value Representation: Byte
Intermediate Results Recycling: No
"""
def __init__(self,
settings,
opencl=None,
verbose=False,
command_line=False,
edge_length=10240,
processing_order=Diagonal,
optimisations_enabled=False,
data_type=np.uint32):
RQASubMatricesCarryover.__init__(self, settings, verbose, edge_length, processing_order)
self.opencl = opencl
self.command_line = command_line
self.optimisations_enabled = optimisations_enabled
self.data_type = data_type
self.__initialise()
def __initialise(self):
self.validate_opencl()
self.data_size = np.dtype(self.data_type).itemsize * 8
self.threads_runtimes = {}
self.threads_diagonal_frequency_distribution = {}
self.threads_vertical_frequency_distribution = {}
self.threads_white_vertical_frequency_distribution = {}
for device in self.opencl.devices:
self.threads_runtimes[device] = Runtimes()
self.threads_diagonal_frequency_distribution[device] = self.get_emtpy_global_frequency_distribution()
self.threads_vertical_frequency_distribution[device] = self.get_emtpy_global_frequency_distribution()
self.threads_white_vertical_frequency_distribution[device] = self.get_emtpy_global_frequency_distribution()
def reset(self):
RQASubMatricesCarryover.reset(self)
self.__initialise()
def validate_opencl(self):
if not self.opencl:
self.opencl = OpenCL(verbose=self.verbose,
command_line=self.command_line,
optimisations_enabled=self.optimisations_enabled)
if not self.opencl.programs_created:
self.opencl.create_programs(kernel_file_names=self.settings.get_kernel_file_names(self),
similarity_measure_name=self.settings.similarity_measure.name,
leaf_path=os.path.dirname(os.path.abspath(__file__)),
root_path=self.settings.base_path)
def process_sub_matrix(self, *args, **kwargs):
device = kwargs['device']
sub_matrix_queue = kwargs['sub_matrix_queue']
context = self.opencl.contexts[device]
command_queue = self.opencl.command_queues[device]
program = self.opencl.programs[device]
create_matrix_kernel = cl.Kernel(program, 'create_matrix')
vertical_kernel = cl.Kernel(program, 'vertical')
diagonal_kernel = cl.Kernel(program, self.settings.diagonal_kernel_name)
clear_buffer_kernel = cl.Kernel(program, 'clear_buffer')
while True:
try:
sub_matrix = sub_matrix_queue.get(False)
transfer_from_device_events = []
transfer_to_device_events = []
create_matrix_events = []
vertical_events = []
diagonal_events = []
# Vectors X
vectors_x = self.get_vectors_x(sub_matrix)
vectors_x_buffer = cl.Buffer(context,
cl.mem_flags.READ_ONLY,
vectors_x.size * vectors_x.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
vectors_x_buffer,
vectors_x,
device_offset=0,
wait_for=None,
is_blocking=False))
# Vectors Y
vectors_y = self.get_vectors_y(sub_matrix)
vectors_y_buffer = cl.Buffer(context,
cl.mem_flags.READ_ONLY,
vectors_y.size * vectors_y.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
vectors_y_buffer,
vectors_y,
device_offset=0,
wait_for=None,
is_blocking=False))
# Recurrence matrix
# matrix = self.get_bit_matrix(sub_matrix, self.data_type)
# matrix_buffer = cl.Buffer(context, cl.mem_flags.READ_WRITE, matrix.size * matrix.itemsize)
# transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue, matrix_buffer, matrix, device_offset=0, wait_for=None, is_blocking=False))
matrix_size, matrix_elements = self.get_bit_matrix_size(sub_matrix,
self.data_type)
matrix = np.zeros(1,
dtype=self.data_type)
matrix_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
int(matrix_size))
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
matrix_buffer,
matrix,
device_offset=0,
wait_for=None,
is_blocking=False))
# Recurrence points
recurrence_points, \
recurrence_points_start, \
recurrence_points_end = self.get_recurrence_points(sub_matrix)
recurrence_points_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
recurrence_points.size * recurrence_points.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
recurrence_points_buffer,
recurrence_points,
device_offset=0,
wait_for=None,
is_blocking=False))
# Vertical frequency distribution
vertical_frequency_distribution = self.get_empty_local_frequency_distribution()
vertical_frequency_distribution_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
vertical_frequency_distribution.size * vertical_frequency_distribution.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
vertical_frequency_distribution_buffer,
vertical_frequency_distribution,
device_offset=0,
wait_for=None,
is_blocking=False))
# White vertical frequency distribution
white_vertical_frequency_distribution = self.get_empty_local_frequency_distribution()
white_vertical_frequency_distribution_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
white_vertical_frequency_distribution.size * white_vertical_frequency_distribution.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
white_vertical_frequency_distribution_buffer,
white_vertical_frequency_distribution,
device_offset=0,
wait_for=None,
is_blocking=False))
# Diagonal frequency distribution
diagonal_frequency_distribution = self.get_empty_local_frequency_distribution()
diagonal_frequency_distribution_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
diagonal_frequency_distribution.size * diagonal_frequency_distribution.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
diagonal_frequency_distribution_buffer,
diagonal_frequency_distribution,
device_offset=0,
wait_for=None,
is_blocking=False))
# Vertical carryover
vertical_carryover, \
vertical_carryover_start,\
vertical_carryover_end = self.get_vertical_length_carryover(sub_matrix)
vertical_carryover_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
vertical_carryover.size * vertical_carryover.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
vertical_carryover_buffer,
vertical_carryover,
device_offset=0,
wait_for=None,
is_blocking=False))
# White vertical carryover
white_vertical_carryover, \
white_vertical_carryover_start,\
white_vertical_carryover_end = self.get_white_vertical_length_carryover(sub_matrix)
white_vertical_carryover_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
white_vertical_carryover.size * white_vertical_carryover.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
white_vertical_carryover_buffer,
white_vertical_carryover,
device_offset=0,
wait_for=None,
is_blocking=False))
# Diagonal carryover
diagonal_carryover, \
diagonal_carryover_start, \
diagonal_carryover_end = self.get_diagonal_length_carryover(sub_matrix)
diagonal_carryover_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
diagonal_carryover.size * diagonal_carryover.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
diagonal_carryover_buffer,
diagonal_carryover,
device_offset=0,
wait_for=None,
is_blocking=False))
command_queue.finish()
# Clear buffer kernel
clear_buffer_args = [matrix_buffer]
OpenCL.set_kernel_args(clear_buffer_kernel,
clear_buffer_args)
global_work_size = [int(matrix_elements)]
local_work_size = None
create_matrix_events.append(cl.enqueue_nd_range_kernel(command_queue,
clear_buffer_kernel,
global_work_size,
local_work_size))
command_queue.finish()
# Create matrix kernel
create_matrix_args = [vectors_x_buffer,
vectors_y_buffer,
np.uint32(sub_matrix.dim_x),
np.uint32(sub_matrix.dim_y),
np.uint32(self.settings.embedding_dimension),
np.uint32(self.settings.time_delay),
np.float32(self.settings.neighbourhood.radius),
np.uint32(self.data_size),
matrix_buffer]
OpenCL.set_kernel_args(create_matrix_kernel, create_matrix_args)
global_work_size = [int(sub_matrix.dim_x + (device.max_work_group_size - (sub_matrix.dim_x % device.max_work_group_size))),
int(sub_matrix.dim_y)]
local_work_size = None
create_matrix_events.append(cl.enqueue_nd_range_kernel(command_queue,
create_matrix_kernel,
global_work_size,
local_work_size))
command_queue.finish()
# Vertical kernel
vertical_args = [matrix_buffer,
np.uint32(sub_matrix.dim_x),
np.uint32(sub_matrix.dim_y),
np.uint32(self.data_size),
recurrence_points_buffer,
vertical_frequency_distribution_buffer,
vertical_carryover_buffer,
white_vertical_frequency_distribution_buffer,
white_vertical_carryover_buffer]
OpenCL.set_kernel_args(vertical_kernel,
vertical_args)
global_work_size = [int(sub_matrix.dim_x + (device.max_work_group_size - (sub_matrix.dim_x % device.max_work_group_size)))]
local_work_size = None
vertical_events.append(cl.enqueue_nd_range_kernel(command_queue,
vertical_kernel,
global_work_size,
local_work_size))
command_queue.finish()
# Diagonal kernel
if self.settings.is_matrix_symmetric:
diagonal_args = [matrix_buffer,
np.uint32(sub_matrix.dim_x),
np.uint32(sub_matrix.dim_y),
np.uint32(sub_matrix.start_x),
np.uint32(sub_matrix.start_y),
np.uint32(self.settings.theiler_corrector),
np.uint32(self.data_size),
np.uint32(self.get_diagonal_offset(sub_matrix)),
diagonal_frequency_distribution_buffer,
diagonal_carryover_buffer]
global_work_size = [int(sub_matrix.dim_x + (device.max_work_group_size - (sub_matrix.dim_x % device.max_work_group_size)))]
else:
diagonal_args = [matrix_buffer,
np.uint32(sub_matrix.dim_x),
np.uint32(sub_matrix.dim_y),
np.uint32(sub_matrix.dim_x + sub_matrix.dim_y - 1),
np.uint32(sub_matrix.start_x),
np.uint32(sub_matrix.start_y),
np.uint32(self.settings.theiler_corrector),
np.uint32(self.data_size),
diagonal_frequency_distribution_buffer,
diagonal_carryover_buffer]
global_work_size_x = sub_matrix.dim_x + sub_matrix.dim_y - 1
global_work_size = [int(global_work_size_x + (device.max_work_group_size - (global_work_size_x % device.max_work_group_size)))]
OpenCL.set_kernel_args(diagonal_kernel,
diagonal_args)
local_work_size = None
diagonal_events.append(cl.enqueue_nd_range_kernel(command_queue,
diagonal_kernel,
global_work_size,
local_work_size))
command_queue.finish()
# Read buffer
transfer_from_device_events.append(cl.enqueue_read_buffer(command_queue,
recurrence_points_buffer,
self.recurrence_points[recurrence_points_start:recurrence_points_end],
device_offset=0,
wait_for=None,
is_blocking=False))
transfer_from_device_events.append(cl.enqueue_read_buffer(command_queue,
vertical_frequency_distribution_buffer,
vertical_frequency_distribution,
device_offset=0,
wait_for=None,
is_blocking=False))
transfer_from_device_events.append(cl.enqueue_read_buffer(command_queue,
vertical_carryover_buffer,
self.vertical_length_carryover[vertical_carryover_start:vertical_carryover_end],
device_offset=0,
wait_for=None,
is_blocking=False))
transfer_from_device_events.append(cl.enqueue_read_buffer(command_queue,
white_vertical_frequency_distribution_buffer,
white_vertical_frequency_distribution,
device_offset=0,
wait_for=None,
is_blocking=False))
transfer_from_device_events.append(cl.enqueue_read_buffer(command_queue,
white_vertical_carryover_buffer,
self.white_vertical_length_carryover[white_vertical_carryover_start:white_vertical_carryover_end],
device_offset=0,
wait_for=None,
is_blocking=False))
transfer_from_device_events.append(cl.enqueue_read_buffer(command_queue,
diagonal_frequency_distribution_buffer,
diagonal_frequency_distribution,
device_offset=0,
wait_for=None,
is_blocking=False))
transfer_from_device_events.append(cl.enqueue_read_buffer(command_queue,
diagonal_carryover_buffer,
self.diagonal_length_carryover[diagonal_carryover_start:diagonal_carryover_end],
device_offset=0,
wait_for=None,
is_blocking=False))
command_queue.finish()
# Update frequency distributions
self.threads_vertical_frequency_distribution[device] += vertical_frequency_distribution
self.threads_white_vertical_frequency_distribution[device] += white_vertical_frequency_distribution
self.threads_diagonal_frequency_distribution[device] += diagonal_frequency_distribution
# Get events runtimes
runtimes = Runtimes()
runtimes.transfer_to_device = self.opencl.convert_events_runtime(transfer_to_device_events)
runtimes.transfer_from_device = self.opencl.convert_events_runtime(transfer_from_device_events)
runtimes.create_matrix = self.opencl.convert_events_runtime(create_matrix_events)
runtimes.detect_vertical_lines = self.opencl.convert_events_runtime(vertical_events)
runtimes.detect_diagonal_lines = self.opencl.convert_events_runtime(diagonal_events)
self.threads_runtimes[device] += runtimes
except Queue.Empty:
break
def run_single_device(self):
for sub_matrix_queue in self.sub_matrix_queues:
self.process_sub_matrix(device=self.opencl.devices[0],
sub_matrix_queue=sub_matrix_queue)
def run_multiple_devices(self):
for sub_matrix_queue in self.sub_matrix_queues:
threads = []
for device in self.opencl.devices:
kwargs = {'device': device,
'sub_matrix_queue': sub_matrix_queue}
thread = threading.Thread(group=None, target=self.process_sub_matrix, name=None, args=(), kwargs=kwargs)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def run(self):
self.reset()
runtimes = Runtimes()
if len(self.opencl.devices) == 0:
print 'No device specified!'
return 0
elif len(self.opencl.devices) == 1:
self.run_single_device()
elif len(self.opencl.devices) > 1:
self.run_multiple_devices()
self.post_process_length_carryovers()
for device in self.opencl.devices:
runtimes += self.threads_runtimes[device]
self.diagonal_frequency_distribution += self.threads_diagonal_frequency_distribution[device]
self.vertical_frequency_distribution += self.threads_vertical_frequency_distribution[device]
self.white_vertical_frequency_distribution += self.threads_white_vertical_frequency_distribution[device]
if self.settings.is_matrix_symmetric:
self.extent_diagonal_frequency_distribution()
result = RQAResult(self.settings,
runtimes,
recurrence_points=self.recurrence_points,
diagonal_frequency_distribution=self.diagonal_frequency_distribution,
vertical_frequency_distribution=self.vertical_frequency_distribution,
white_vertical_frequency_distribution=self.white_vertical_frequency_distribution)
return result
|
test_daemon_factory.py
|
import functools
import logging
import pprint
import re
import sys
import time
import attr
import psutil
import pytest
from pytestskipmarkers.utils import platform
from saltfactories.bases import Daemon
from saltfactories.exceptions import FactoryNotRunning
from saltfactories.exceptions import FactoryNotStarted
from saltfactories.utils.processes import _get_cmdline
PROCESS_START_TIMEOUT = 2
log = logging.getLogger(__name__)
def kill_children(procs): # pragma: no cover
_, alive = psutil.wait_procs(procs, timeout=3)
for p in alive:
p.kill()
def test_daemon_process_termination(request, tempfiles):
primary_childrend_count = 5
secondary_children_count = 3
script = tempfiles.makepyfile(
"""
#!{shebang}
# coding=utf-8
import time
import multiprocessing
def spin():
while True:
try:
time.sleep(0.25)
except KeyboardInterrupt:
break
def spin_children():
procs = []
for idx in range({secondary_children_count}):
proc = multiprocessing.Process(target=spin)
proc.daemon = True
proc.start()
procs.append(proc)
while True:
try:
time.sleep(0.25)
except KeyboardInterrupt:
break
def main():
procs = []
for idx in range({primary_childrend_count}):
proc = multiprocessing.Process(target=spin_children)
procs.append(proc)
proc.start()
while True:
try:
time.sleep(0.25)
except KeyboardInterrupt:
break
# We're not terminating child processes on purpose. Our code should handle it.
# Support for windows test runs
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
""".format(
shebang=sys.executable,
primary_childrend_count=primary_childrend_count,
secondary_children_count=secondary_children_count,
),
executable=True,
)
if not platform.is_windows():
factory_kwargs = dict(script_name=script)
else:
# Windows don't know how to handle python scripts directly
factory_kwargs = dict(script_name=sys.executable, base_script_args=[script])
daemon = Daemon(start_timeout=1, **factory_kwargs)
daemon.start()
daemon_pid = daemon.pid
# Make sure the daemon is terminated no matter what
request.addfinalizer(daemon.terminate)
# Allow the script to start
time.sleep(PROCESS_START_TIMEOUT)
assert psutil.pid_exists(daemon_pid)
proc = psutil.Process(daemon_pid)
children = proc.children(recursive=True)
request.addfinalizer(functools.partial(kill_children, children))
child_count = len(children)
expected_count = primary_childrend_count + (primary_childrend_count * secondary_children_count)
if platform.is_windows() and sys.version_info[:2] == (3, 7):
# Under Python 3.7 and Windows we always seem to get +1 child
# XXX: Don't forget to look what this extra child is
expected_count += 1
assert child_count == expected_count, "{}!={}\n{}".format(
child_count,
expected_count,
pprint.pformat([_get_cmdline(child) or child for child in children]),
)
daemon.terminate()
assert psutil.pid_exists(daemon_pid) is False
for child in list(children): # pragma: no cover
if psutil.pid_exists(child.pid):
continue
children.remove(child)
assert not children, "len(children)=={} != 0\n{}".format(
len(children), pprint.pformat([_get_cmdline(child) or child for child in children])
)
@pytest.mark.skip("Will debug later")
def test_daemon_process_termination_parent_killed(request, tempfiles):
primary_childrend_count = 5
secondary_children_count = 3
script = tempfiles.makepyfile(
"""
#!{shebang}
# coding=utf-8
import time
import multiprocessing
def spin():
while True:
try:
time.sleep(0.25)
except KeyboardInterrupt:
break
def spin_children():
procs = []
for idx in range({secondary_children_count}):
proc = multiprocessing.Process(target=spin)
proc.daemon = True
proc.start()
procs.append(proc)
while True:
try:
time.sleep(0.25)
except KeyboardInterrupt:
break
def main():
procs = []
for idx in range({primary_childrend_count}):
proc = multiprocessing.Process(target=spin_children)
procs.append(proc)
proc.start()
while True:
try:
time.sleep(0.25)
except KeyboardInterrupt:
break
# We're not terminating child processes on purpose. Our code should handle it.
# Support for windows test runs
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
""".format(
shebang=sys.executable,
primary_childrend_count=primary_childrend_count,
secondary_children_count=secondary_children_count,
),
executable=True,
)
if not platform.is_windows():
factory_kwargs = dict(script_name=script)
else:
# Windows don't know how to handle python scripts directly
factory_kwargs = dict(script_name=sys.executable, base_script_args=[script])
daemon = Daemon(start_timeout=1, **factory_kwargs)
daemon.start()
daemon_pid = daemon.pid
# Make sure the daemon is terminated no matter what
request.addfinalizer(daemon.terminate)
# Allow the script to start
time.sleep(PROCESS_START_TIMEOUT)
assert psutil.pid_exists(daemon_pid)
proc = psutil.Process(daemon_pid)
children = proc.children(recursive=True)
request.addfinalizer(functools.partial(kill_children, children))
assert len(children) == primary_childrend_count + (
primary_childrend_count * secondary_children_count
)
# Pretend the parent process died.
proc.kill()
time.sleep(0.5)
# We should should still be able to terminate all child processes
daemon.terminate()
assert psutil.pid_exists(daemon_pid) is False
psutil.wait_procs(children, timeout=3)
for child in list(children):
if psutil.pid_exists(child.pid):
continue
children.remove(child)
assert not children, "len(children)=={} != 0\n{}".format(
len(children), pprint.pformat(children)
)
@pytest.mark.parametrize("start_timeout", [0.1, 0.3])
def test_started_context_manager(request, tempfiles, start_timeout):
script = tempfiles.makepyfile(
r"""
# coding=utf-8
import sys
import time
import multiprocessing
def main():
time.sleep(3)
sys.stdout.write("Done!\n")
sys.stdout.flush()
sys.exit(0)
# Support for windows test runs
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
""",
executable=True,
)
daemon = Daemon(
script_name=sys.executable,
base_script_args=[script],
start_timeout=2,
max_start_attempts=1,
check_ports=[12345],
)
# Make sure the daemon is terminated no matter what
request.addfinalizer(daemon.terminate)
with pytest.raises(FactoryNotStarted) as exc:
daemon.start(start_timeout=start_timeout)
match = re.search(r"which took (?P<seconds>.*) seconds", str(exc.value))
assert match
# XXX: Revisit logic
# seconds = float(match.group("seconds"))
## Must take at least start_timeout to start
# assert seconds > start_timeout
## Should not take more than start_timeout + 0.3 to start and fail
# assert seconds < start_timeout + 0.3
# And using a context manager?
with pytest.raises(FactoryNotStarted) as exc:
started = None
with daemon.started(start_timeout=start_timeout):
# We should not even be able to set the following variable
started = False # pragma: no cover
assert started is None
match = re.search(r"which took (?P<seconds>.*) seconds", str(exc.value))
assert match
# XXX: Revisit logic
# seconds = float(match.group("seconds"))
## Must take at least start_timeout to start
# assert seconds > start_timeout
## Should not take more than start_timeout + 0.3 to start and fail
# assert seconds < start_timeout + 0.3
@pytest.fixture
def factory_stopped_script(tempfiles):
return tempfiles.makepyfile(
r"""
# coding=utf-8
import os
import sys
import time
import socket
import multiprocessing
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', 12345))
sock.listen(5)
try:
while True:
connection, address = sock.accept()
connection.close()
except (KeyboardInterrupt, SystemExit):
pass
finally:
sock.close()
sys.exit(0)
# Support for windows test runs
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
""",
executable=True,
)
def test_stopped_context_manager_raises_FactoryNotRunning(request, factory_stopped_script):
daemon = Daemon(
script_name=sys.executable,
base_script_args=[factory_stopped_script],
start_timeout=3,
max_start_attempts=1,
check_ports=[12345],
)
# Make sure the daemon is terminated no matter what
request.addfinalizer(daemon.terminate)
with pytest.raises(FactoryNotRunning):
with daemon.stopped():
pass
def test_stopped_context_manager(request, factory_stopped_script):
daemon = Daemon(
script_name=sys.executable,
base_script_args=[factory_stopped_script],
start_timeout=3,
max_start_attempts=1,
check_ports=[12345],
)
# Make sure the daemon is terminated no matter what
request.addfinalizer(daemon.terminate)
with daemon.started():
assert daemon.is_running()
with daemon.stopped():
assert daemon.is_running() is False
assert daemon.is_running()
@attr.s
class CallbackState:
daemon = attr.ib()
before_stop_callback_called = attr.ib(default=False)
after_stop_callback_called = attr.ib(default=False)
before_start_callback_called = attr.ib(default=False)
after_start_callback_called = attr.ib(default=False)
def before_stop_callback(self, daemon):
assert daemon is self.daemon
self.before_stop_callback_called = True
def after_stop_callback(self, daemon):
assert daemon is self.daemon
self.after_stop_callback_called = True
def before_start_callback(self, daemon):
assert daemon is self.daemon
self.before_start_callback_called = True
def after_start_callback(self, daemon):
assert daemon is self.daemon
self.after_start_callback_called = True
def test_stopped_context_manager_callbacks(request, factory_stopped_script):
daemon = Daemon(
script_name=sys.executable,
base_script_args=[factory_stopped_script],
start_timeout=3,
max_start_attempts=1,
check_ports=[12345],
)
# Make sure the daemon is terminated no matter what
request.addfinalizer(daemon.terminate)
daemon_started_once = False
with daemon.started():
daemon_started_once = daemon.is_running()
assert daemon_started_once is True
callbacks = CallbackState(daemon)
assert callbacks.before_stop_callback_called is False
assert callbacks.after_stop_callback_called is False
assert callbacks.before_start_callback_called is False
assert callbacks.after_start_callback_called is False
with daemon.stopped(
before_stop_callback=callbacks.before_stop_callback,
after_stop_callback=callbacks.after_stop_callback,
before_start_callback=callbacks.before_start_callback,
after_start_callback=callbacks.after_start_callback,
):
assert daemon.is_running() is False
assert callbacks.before_stop_callback_called is True
assert callbacks.after_stop_callback_called is True
assert callbacks.before_start_callback_called is False
assert callbacks.after_start_callback_called is False
assert daemon.is_running()
assert callbacks.before_stop_callback_called is True
assert callbacks.after_stop_callback_called is True
assert callbacks.before_start_callback_called is True
assert callbacks.after_start_callback_called is True
# Reset the callbacks state
callbacks.before_stop_callback_called = False
callbacks.after_stop_callback_called = False
callbacks.before_start_callback_called = False
callbacks.after_start_callback_called = False
# Let's got through stopped again, the callbacks should not be called again
# because they are not passed into .stopped()
with daemon.stopped():
assert daemon.is_running() is False
assert daemon.is_running()
assert callbacks.before_stop_callback_called is False
assert callbacks.after_stop_callback_called is False
assert callbacks.before_start_callback_called is False
assert callbacks.after_start_callback_called is False
assert daemon_started_once is True
def test_context_manager_returns_class_instance(tempfiles):
script = tempfiles.makepyfile(
r"""
# coding=utf-8
import sys
import time
import multiprocessing
def main():
while True:
try:
time.sleep(0.1)
except KeyboardInterrupt:
break
sys.stdout.write("Done!\n")
sys.stdout.flush()
sys.exit(0)
# Support for windows test runs
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
""",
executable=True,
)
daemon = Daemon(
script_name=sys.executable,
base_script_args=[script],
start_timeout=1,
max_start_attempts=1,
)
# Without starting the factory
started = d = None
with pytest.raises(RuntimeError):
with daemon as d:
# We should not even be able to set the following variable
started = d.is_running() # pragma: no cover
assert d is None
assert started is None
# After starting the factory
started = False
daemon.start()
with daemon as d:
# We should not even be able to set the following variable
started = d.is_running()
assert d.is_running() is False
assert started is True
# By starting the factory and passing timeout directly
started = False
with daemon.started(start_timeout=1) as d:
# We should not even be able to set the following variable
started = d.is_running()
assert d.is_running() is False
assert started is True
# By starting the factory without any keyword arguments
started = False
with daemon.started() as d:
# We should not even be able to set the following variable
started = d.is_running()
assert d.is_running() is False
assert started is True
@pytest.mark.parametrize("max_start_attempts", [1, 2, 3])
def test_exact_max_start_attempts(tempfiles, caplog, max_start_attempts):
"""
This test asserts that we properly report max_start_attempts
"""
script = tempfiles.makepyfile(
r"""
# coding=utf-8
import sys
import time
import multiprocessing
def main():
time.sleep(0.125)
sys.exit(1)
# Support for windows test runs
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
""",
executable=True,
)
daemon = Daemon(
script_name=sys.executable,
base_script_args=[script],
start_timeout=0.1,
max_start_attempts=max_start_attempts,
check_ports=[12345],
)
with caplog.at_level(logging.INFO):
with pytest.raises(FactoryNotStarted) as exc:
daemon.start()
assert "confirm running status after {} attempts".format(max_start_attempts) in str(
exc.value
)
start_attempts = [
"Attempt: {} of {}".format(n, max_start_attempts) for n in range(1, max_start_attempts + 1)
]
for record in caplog.records:
if not record.message.startswith("Starting Daemon"):
continue
for idx, start_attempt in enumerate(list(start_attempts)):
if start_attempt in record.message:
start_attempts.pop(idx)
assert not start_attempts
|
ucomm_relay_server.py
|
#!/usr/bin/env python
#
# Cloudlet Infrastructure for Mobile Computing
#
# Author: Kiryong Ha <krha@cmu.edu>
# Zhuo Chen <zhuoc@cs.cmu.edu>
#
# Copyright (C) 2011-2013 Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import multiprocessing
import Queue
import select
import socket
import SocketServer
import struct
import sys
import threading
import time
import gabriel
LOG = gabriel.logging.getLogger(__name__)
class UCommError(Exception):
pass
class UCommRelayHandler(gabriel.network.CommonHandler):
'''
The server that is connected with ucomm module.
It receives return messages and put them into @result_queue,
which are then picked up by mobile result handler (in mobile_server.py) to be sent to the mobile device
'''
def setup(self):
super(UCommRelayHandler, self).setup()
def __repr__(self):
return "UCOMM Relay Handler"
def handle(self):
LOG.info("User communication module is connected")
super(UCommRelayHandler, self).handle()
def _handle_input_data(self):
rtn_size = struct.unpack("!I", self._recv_all(4))[0]
rtn_header_size = struct.unpack("!I", self._recv_all(4))[0]
rtn_header = self._recv_all(rtn_header_size)
rtn_data = self._recv_all(rtn_size-rtn_header_size)
gabriel.control.result_queue.put( (rtn_header, rtn_data) )
# control messages
rtn_header_json = json.loads(rtn_header)
message_control = rtn_header_json.get('control', None)
if message_control is not None:
message_control = str(message_control) # this will be unicode otherwise
gabriel.control.command_queue.put(message_control)
class UCommRelayServer(gabriel.network.CommonServer):
def __init__(self, port, handler):
gabriel.network.CommonServer.__init__(self, port, handler) # cannot use super because it's old style class
LOG.info("* UComm relay server(%s) configuration" % str(self.handler))
LOG.info(" - Open TCP Server at %s" % (str(self.server_address)))
LOG.info(" - Disable nagle (No TCP delay) : %s" %
str(self.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)))
LOG.info("-" * 50)
def terminate(self):
gabriel.network.CommonServer.terminate(self)
def main():
ucomm_relay_server = UCommRelayServer(gabriel.Const.UCOMM_COMMUNICATE_PORT, UCommRelayHandler)
ucomm_relay_thread = threading.Thread(target = ucomm_relay_server.serve_forever)
ucomm_relay_thread.daemon = True
try:
ucomm_relay_thread.start()
while True:
time.sleep(100)
except KeyboardInterrupt as e:
sys.stdout.write("Exit by user\n")
ucomm_relay_server.terminate()
sys.exit(1)
except Exception as e:
sys.stderr.write(str(e))
ucomm_relay_server.terminate()
sys.exit(1)
else:
ucomm_relay_server.terminate()
sys.exit(0)
if __name__ == '__main__':
main()
|
named_threads.py
|
import threading
import time
def worker():
print( threading.currentThread().getName(), 'Starting')
time.sleep(2)
print( threading.currentThread().getName(), 'Exiting')
def my_service():
print( threading.currentThread().getName(), 'Starting')
time.sleep(3)
print( threading.currentThread().getName(), 'Exiting')
t = threading.Thread(name='my_service', target=my_service)
w = threading.Thread(name='worker', target=worker)
w2 = threading.Thread(target=worker) # use default name
w.start()
w2.start()
t.start()
|
utils.py
|
import threading
from pathlib import Path
import playsound
import rpipes
from rpipes.border import draw_boundary
def get_int_input(prompt: str, x: int, y: int) -> int:
"""Ask for integer input with `prompt` positioned at `x`, `y`."""
print(rpipes.terminal.clear, end="")
draw_boundary()
previous_input = ""
while True:
print(rpipes.terminal.move_xy(x, y) + " " * len(prompt + previous_input), end="")
previous_input = input(rpipes.terminal.move_xy(x, y) + prompt)
try:
return int(previous_input)
except ValueError:
print(rpipes.terminal.move_xy(x, y + 1) + "Invalid input!")
def play_sound(file_path: Path) -> None:
"""Run sound file behind `file_path` asynchronously."""
threading.Thread(target=playsound.playsound, args=(file_path,), daemon=True).start()
|
detection_thread.py
|
"""Thread for processing object detection."""
import logging
import threading
import time
from datetime import datetime
from multiprocessing.context import Process
import cv2
import imutils
from PIL import Image
from edgetpu_server.image_writer_thread import ImageWriterThread
from edgetpu_server.models.detection_entity import DetectionEntity
_LOGGER = logging.getLogger(__name__)
DEFAULT_WIDTH = 500
FRAME_FAILURE_SLEEP = 0.5
CV_CAP_PROP_FRAME_COUNT = 7
CV_CAP_PROP_POS_FRAMES = 1
# pylint: disable=too-few-public-methods
class DetectionThread:
"""Image detection thread."""
def __init__(self, entity_stream, engine, hass, video_stream_lock):
# self._set_image_data = set_image_data
self.entity_stream = entity_stream
self.engine = engine
self.hass = hass
self.video_url = entity_stream.stream_url
self.video_stream = entity_stream.video_stream
self.video_stream_lock = video_stream_lock
def _retrieve_frame(self):
ret = None
frame = None
start = datetime.now().timestamp()
self.video_stream_lock.acquire()
try:
ret, frame = self.video_stream.retrieve()
except Exception as err:
_LOGGER.error("Error retrieving video frame: %s",
str(err))
finally:
self.video_stream_lock.release()
if not ret or not frame:
return None
frame = cv2.cvtColor( # pylint: disable=no-member
imutils.resize(
frame,
width=DEFAULT_WIDTH
),
cv2.COLOR_BGR2RGB # pylint: disable=no-member
) # pylint: disable=no-member
_LOGGER.debug(
"Retrieving frame took %f ms time for %s (%s)",
(datetime.now().timestamp()) - start,
self.entity_stream.entity_id,
self.entity_stream.stream_url
)
return Image.fromarray(frame) #, Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
def _process_frame(self, frame):
start = datetime.now().timestamp()
try:
detection_entity = DetectionEntity(
self.entity_stream.name,
self.entity_stream.entity_id,
self.engine.filtered_detect_with_image(frame)
)
except Exception as err:
_LOGGER.error(
"Error processing frame: %s",
str(err)
)
_LOGGER.debug(
"Processing frame took %f ms time for %s (%s)",
datetime.now().timestamp() - start,
self.entity_stream.entity_id,
self.entity_stream.stream_url
)
return detection_entity
def _set_state(self, detection_entity):
start = datetime.now().timestamp()
try:
self.hass.set_entity_state(detection_entity)
except Exception as err:
_LOGGER.error(
"Error setting entity state %s: %s",
detection_entity.entity_id,
str(err)
)
_LOGGER.debug(
"Setting entity state took %f ms time for %s (%s)",
datetime.now().timestamp() - start,
self.entity_stream.entity_id,
self.entity_stream.stream_url
)
def run(self):
"""Loop through video stream frames and detect objects."""
_LOGGER.warn('Running detection thread')
while self.video_stream.isOpened():
start = datetime.now().timestamp()
frame = self._retrieve_frame()
# if original is None:
# _LOGGER.warning(
# "Unable to get original frame for %s",
# self.video_url
# )
if frame is None:
_LOGGER.warning(
"Unable to retrieve frame %s, sleeping for %f s",
self.video_url,
FRAME_FAILURE_SLEEP
)
time.sleep(FRAME_FAILURE_SLEEP)
continue
detection_entity = self._process_frame(frame)
self._set_state(detection_entity)
# self._annotate_image(original, detection_entity)
_LOGGER.debug(
"Detection loop took %f ms time for %s (%s)",
datetime.now().timestamp() - start,
self.entity_stream.entity_id,
self.entity_stream.stream_url
)
_LOGGER.warn('Video stream closed')
# def _annotate_image(self, frame, detection_entity):
# image_writer = ImageWriterThread(
# self._set_image_data,
# frame,
# detection_entity
# )
#
# image_writer = Process(target=image_writer.run, daemon=True)
# image_writer.start()
|
server.py
|
########### Please use python3 to run the codes. ##########
########### Usage: python3 server.py port_number try_times ###########
############ This program is simulate the server behaviour with different functions ########
from socket import *
import sys
import time
import datetime
import threading
#check the input arguments
if len(sys.argv) != 3:
raise ValueError('Invalid arguments. The terminal command should meet the format: python3 server.py serverport number_of_consecutive_failed_attempts.')
serverPort = int(sys.argv[1])
TRY_count = sys.argv[2]
#define message types
POST_MESSAGE = 'MSG'
DELETE_MESSAGE = 'DLT'
EDIT_MESSAGE = 'EDT'
READ_MESSAGE = 'RDM'
ACTIVATE_USERS = 'ATU'
UPLOAD_FILE = 'UPD'
DISCONNECT_MESSAGE = 'OUT'
#check the attempt numbers
if float(TRY_count) != int(float(TRY_count)) or int(TRY_count)> 5:
raise ValueError('The login trying number should be the integer between 1 and 5')
TRY_count = int(TRY_count)
#user login
def login(connectionSocket,address):
user_list = []
passwd_list = []
block_list = []
try_count = 1
#Get the users and passwds
with open('credentials.txt') as f:
line = f.readline()
while line:
user,passwd = line.strip().split()
user_list.append(user)
passwd_list.append(passwd)
line = f.readline()
#Produce block list
try:
with open("block.txt", "r+") as f:
d = f.readlines()
t = time.time()
f.seek(0)
#if the current time is 10s less than the last failed login time. remain in the block list
for i in d:
b_t = float(i.strip().split()[1])
if (t - b_t) <= 10:
f.write(i)
f.truncate()
with open("block.txt") as f:
d = f.readlines()
for i in d:
block_list.append(i.strip().split()[0])
except:
pass
#inplement login
while True:
#first try. with user name and passwd
if try_count == 1:
connectionSocket.send('Please enter your username:'.encode())
user = connectionSocket.recv(1024).decode()
idx = user_list.index(user)
connectionSocket.send('Please enter your password:'.encode())
passwd = connectionSocket.recv(1024).decode()
# only input passwd
else:
passwd = connectionSocket.recv(1024).decode()
#block the user if is in block list
if user in block_list:
connectionSocket.send('Failed.Your account is still in blocked, please try later.'.encode())
break
#check passwd
if passwd == passwd_list[idx]:
#successfully login in. added to active user log
print(f'{user} logged in.')
connectionSocket.send(f'Welcome {user}!\nEnter one of the following commands (MSG, DLT, EDT, RDM, ATU, OUT, UPD):'.encode())
UDP_port = connectionSocket.recv(1024).decode()
t = time.ctime().split()
temp_t = t[2] + ' ' + t[1] + ' ' + t[-1] + ' ' + t[-2]
try:
with open('userlog.txt') as f:
d = f.readlines()
idx = len(d) + 1
with open('userlog.txt','a') as f:
temp = str(idx) + '; ' + temp_t + '; ' + user + '; ' + address + '; ' + UDP_port + '\n'
f.write(temp)
except:
idx = 1
with open("userlog.txt", "a") as f:
temp = '1' + '; ' + temp_t + '; ' + user + '; ' + address + '; ' + UDP_port + '\n'
f.write(temp)
return user
else:
#passwd is not correct
if try_count < TRY_count:
connectionSocket.send(f'Password is not correct. Left trying time: {TRY_count - try_count}. Please reenter your password:'.encode())
try_count += 1
continue
#try times execeeded. add to block list
else:
t = str(time.time())
temp = user + ' ' + t + '\n'
with open('block.txt','a') as f:
f.write(temp)
connectionSocket.send('Failed.You have tried many times and the account has been blocked, please come back later.\n'.encode())
break
return False
#user log out
def logout(conn,user):
#find the log out user seq number
with open('userlog.txt','r') as f:
d = f.readlines()
for i in d:
name = i.strip().split('; ')[2]
if user == name:
idx = int(i.strip().split('; ')[0])
break
else:
continue
#delete the user from the active table
with open('userlog.txt','r+') as f:
d = f.readlines()
f.seek(0)
#remove the user in the active user log
for i in d:
temp_list = i.strip().split('; ')
if temp_list[2] != user:
if int(temp_list[0]) > idx:
new_idx = str(int(temp_list[0]) - 1)
temp_list[0] = new_idx
i = '; '.join(temp_list) + '\n'
else:
pass
f.write(i)
f.truncate()
print(f'{user} logged out.')
conn.send(f'Successfully disconnted with the server. See ya {user}!'.encode())
#user send the message
def post_message(user,message):
t = time.ctime().split()
temp_t = t[2] + ' ' + t[1] + ' ' + t[-1] + ' ' + t[-2]
try:
with open('messagelog.txt') as f:
d = f.readlines()
idx = len(d) + 1
with open('messagelog.txt','a') as f:
temp = str(idx) + '; ' + temp_t + '; ' + user + '; ' + message + '; no'+ '\n'
f.write(temp)
except:
idx = 1
with open("messagelog.txt", "a") as f:
temp = '1' + '; ' + temp_t + '; ' + user + '; ' + message + '; no'+ '\n'
f.write(temp)
print(f'{user} posted MSG # {idx} "{message}" at {temp_t}')
return f'Message #{idx} posted at {temp_t}'
#user delete the message
def delete_message(user,check_list):
user_msg_list = []
check = ' '.join(check_list) + ' ' + user
try:
#extract the message file
with open("messagelog.txt", "r") as f:
d = f.readlines()
for i in d:
lst = i.strip().split('; ')
temp_user = lst[2]
temp_mg = lst[1]
temp_num = lst[0]
user_msg_list.append( '#' + temp_num + ' ' + temp_mg + ' ' + temp_user)
#if the 3 condctions are not matched, send back error message
if check not in user_msg_list:
cur_t = time.ctime()
print(f'{user} failed to delete MSG at {cur_t}')
return 'Delete the message failed. Please check the input.'
#if match, find the corresponding seq number then delete, and move up
else:
idx = user_msg_list.index(check) + 1
with open('messagelog.txt','r+') as f:
d = f.readlines()
f.seek(0)
for i in d:
temp_lst = i.strip().split('; ')
if int(temp_lst[0]) != idx:
if int(temp_lst[0]) > idx:
new_idx = str(int(temp_lst[0]) - 1)
temp_lst[0] = new_idx
i = '; '.join(temp_lst) + '\n'
else:
pass
f.write(i)
else:
msg = temp_lst[-2]
continue
f.truncate()
cur_t = time.ctime()
print(f'{user} deleted MSG # {idx} "{msg}" at {cur_t}')
return f'Delete the message successfully. Message #{idx} deleted at {cur_t}.'
#if the messages file is not created.
except:
cur_t = time.ctime()
print(f'{user} failed to delete MSG at {cur_t}')
return 'Delete the message failed. There is no messages'
#user edit the message
def edit_message(user,check_list):
user_msg_list = []
check = ' '.join(check_list[:5]) + ' ' + user
new_msg = ' '.join(check_list[5:])
try:
#extract the message file
with open("messagelog.txt", "r") as f:
d = f.readlines()
for i in d:
lst = i.strip().split('; ')
temp_user = lst[2]
temp_mg = lst[1]
temp_num = lst[0]
user_msg_list.append( '#' + temp_num + ' ' + temp_mg + ' ' + temp_user)
#if the 3 condctions are not matched, send back error message
if check not in user_msg_list:
cur_t = time.ctime()
print(f'{user} failed to edit MSG at {cur_t}')
return 'Edited the message failed. Please check the input.'
#start to edit message
else:
idx = user_msg_list.index(check) + 1
with open('messagelog.txt','r+') as f:
d = f.readlines()
f.seek(0)
for i in d:
temp_lst = i.strip().split('; ')
#replace the message and update information
if int(temp_lst[0]) == idx:
t = time.ctime().split()
cur_t = t[2] + ' ' + t[1] + ' ' + t[-1] + ' ' + t[-2]
temp_lst[1] = cur_t
temp_lst[-1] = 'yes'
temp_lst[-2] = new_msg
i = '; '.join(temp_lst) + '\n'
else:
pass
f.write(i)
f.truncate()
cur_t = time.ctime()
print(f'{user} edited MSG # {idx} "{new_msg}" at {cur_t}')
return f'Edit the message successfully. Message #{idx} edited at {cur_t}.'
#if no message file created
except:
cur_t = time.ctime()
print(f'{user} failed to Edit MSG at {cur_t}')
return 'Edit the message failed. There is no messages'
#user read new message
def read_message(user,check_list):
date_time_str = ' '.join(check_list)
#check if input datetime format correct,conver to datetime object
try:
comp_date = datetime.datetime.strptime(date_time_str, '%d %b %Y %H:%M:%S')
except:
print(f'{user} issued RDM command failed.')
return 'Read message fail. Invalid datetime format.Please follow(dd mm yyyy hh:mm:s).'
#Reading message, compare to the request time, if bigger, add to the msg
try:
msg = ''
with open("messagelog.txt", "r") as f:
d = f.readlines()
for i in d:
temp = i.strip().split('; ')
temp_date = temp[1]
date_time_obj = datetime.datetime.strptime(temp_date, '%d %b %Y %H:%M:%S')
if date_time_obj >= comp_date:
if temp[-1] == 'no':
msg += '#' + temp[0] + '; ' + temp[-3] + ': ' + f'"{temp[-2]}" ' + 'posted at ' + temp[1] + '.\n'
else:
msg += '#' + temp[0] + '; ' + temp[-3] + ': ' + f'"{temp[-2]}" ' + 'edited at ' + temp[1] + '.\n'
else:
continue
#if no new message
if msg == '':
print(f'{user} issued RDM command failed.')
return 'Read message failed. There is no new message'
#send required messages
else:
print(f'{user} issued RDM command.\nReturn message:\n{msg}')
return f'Read message successfully. The new message is:\n{msg}'
#if message file not created
except:
print(f'{user} issued RDM command failed.')
return 'Read the message failed. There is no messages'
#user apply for current active users
def download_active_users(user):
active_count = 0
#read the active user file
with open('userlog.txt') as f:
d = f.readlines()
temp_str = 'Current active users is:\n'
for i in d:
temp_list = i.strip().split('; ')
name = temp_list[2]
if user == name:
continue
t = temp_list[1]
ip = temp_list[-2]
port = temp_list[-1]
temp_str += name + ', '+ ip + ', ' + port + ', active since ' + t +'\n'
active_count += 1
#only one user active
if active_count == 0:
print(f'{user} issued ATU command ATU.')
return 'Currently no other user is in active.'
#return user list
else:
print(f'{user} issued ATU command ATU.')
print(f'Return active user list: \n{temp_str}')
return temp_str
#user issued uploaded file
def upload_file(user,temp_list):
#if the input is not corret, return back the message
if len(temp_list) != 2:
return 'Unsuccessfully require sending file. Input format not correct. Please retry.'
check_user = temp_list[0]
file_name = temp_list[1]
users = []
ips = []
ports = []
#read the current active users
with open('userlog.txt') as f:
d = f.readlines()
for i in d:
lst = i.strip().split('; ')
users.append(lst[-3])
ips.append(lst[-2])
ports.append(lst[-1])
#if target user is not active
if check_user not in users:
return 'Unsuccessfully require sending file. Target user is not active.'
#Server send back the detail of the target user to sender
else:
msg = 'Transfer '
idx = users.index(check_user)
des_ip = ips[idx]
des_port = ports[idx]
file_name = user + '_' + file_name
msg += des_ip + ' ' + des_port + ' ' + file_name + ' ' + check_user
return msg
#main function to handle user message
def handle_client(conn, addr):
user = login(conn,addr[0])
if user:
print(f'[NEW CONNECTION] {addr} connected.')
connected = True
else:
connected = False
while connected:
msg = conn.recv(1024).decode()
temp_str = msg.strip().split()
command = temp_str[0]
check_str = temp_str[1:]
#Dealing with all kinds of different functions
if command == ACTIVATE_USERS or command == DISCONNECT_MESSAGE:
if len(temp_str) != 1:
conn.send(f'Invalid format of {command}. There should be no arguments for this command. Please retry:'.encode())
continue
else:
if command == DISCONNECT_MESSAGE:
connected = False
logout(conn,user)
break
else:
res = download_active_users(user)
elif command == POST_MESSAGE:
temp_meg = ' '.join(check_str)
res = post_message(user,temp_meg)
elif command == DELETE_MESSAGE:
res = delete_message(user,check_str)
elif command == EDIT_MESSAGE:
res = edit_message(user,check_str)
elif command == UPLOAD_FILE:
res = upload_file(user,check_str)
elif command == READ_MESSAGE:
res = read_message(user,check_str)
else:
conn.send('Invalid command. Please use available command (MSG, DLT, EDT, RDM, ATU, OUT, UPD):'.encode())
continue
res += '\nEnter one of the following commands (MSG, DLT, EDT, RDM, ATU, OUT, UPD):'
conn.send(res.encode())
conn.close()
#get server name
SERVER = gethostbyname(gethostname())
#SERVER = 'localhost'
serverSocket = socket(AF_INET, SOCK_STREAM)
serverSocket.bind((SERVER,serverPort))
#server multithreading process
def start():
serverSocket.listen()
print(f'Server is listening,the IP address is {SERVER}.')
while 1:
conn, addr = serverSocket.accept()
thread = threading.Thread(target=handle_client,args=[conn,addr])
thread.start()
print(f'[ACTIVE CONNECTIONS] {threading.active_count() - 1}')
start()
|
test_gil_scoped.py
|
# -*- coding: utf-8 -*-
import multiprocessing
import threading
from pybind11_tests import gil_scoped as m
def _run_in_process(target, *args, **kwargs):
"""Runs target in process and returns its exitcode after 10s (None if still alive)."""
process = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
process.daemon = True
try:
process.start()
# Do not need to wait much, 10s should be more than enough.
process.join(timeout=10)
return process.exitcode
finally:
if process.is_alive():
process.terminate()
def _python_to_cpp_to_python():
"""Calls different C++ functions that come back to Python."""
class ExtendedVirtClass(m.VirtClass):
def virtual_func(self):
pass
def pure_virtual_func(self):
pass
extended = ExtendedVirtClass()
m.test_callback_py_obj(lambda: None)
m.test_callback_std_func(lambda: None)
m.test_callback_virtual_func(extended)
m.test_callback_pure_virtual_func(extended)
def _python_to_cpp_to_python_from_threads(num_threads, parallel=False):
"""Calls different C++ functions that come back to Python, from Python threads."""
threads = []
for _ in range(num_threads):
thread = threading.Thread(target=_python_to_cpp_to_python)
thread.daemon = True
thread.start()
if parallel:
threads.append(thread)
else:
thread.join()
for thread in threads:
thread.join()
def test_python_to_cpp_to_python_from_thread():
"""Makes sure there is no GIL deadlock when running in a thread.
It runs in a separate process to be able to stop and assert if it deadlocks.
"""
assert _run_in_process(_python_to_cpp_to_python_from_threads, 1) == 0
def test_python_to_cpp_to_python_from_thread_multiple_parallel():
"""Makes sure there is no GIL deadlock when running in a thread multiple times in parallel.
It runs in a separate process to be able to stop and assert if it deadlocks.
"""
assert _run_in_process(_python_to_cpp_to_python_from_threads, 8, parallel=True) == 0
def test_python_to_cpp_to_python_from_thread_multiple_sequential():
"""Makes sure there is no GIL deadlock when running in a thread multiple times sequentially.
It runs in a separate process to be able to stop and assert if it deadlocks.
"""
assert _run_in_process(_python_to_cpp_to_python_from_threads, 8, parallel=False) == 0
def test_python_to_cpp_to_python_from_process():
"""Makes sure there is no GIL deadlock when using processes.
This test is for completion, but it was never an issue.
"""
assert _run_in_process(_python_to_cpp_to_python) == 0
def test_cross_module_gil():
"""Makes sure that the GIL can be acquired by another module from a GIL-released state."""
m.test_cross_module_gil() # Should not raise a SIGSEGV
|
inference_throughput.py
|
#!/usr/bin/env python3
'''aionfpga ~ inference throughput
Copyright (C) 2020 Dominik Müller and Nico Canzani
'''
import sys
import math
import base64
import ctypes
from threading import Thread
from datetime import datetime
import cv2
import numpy as np
from dnndk import n2cube
from pynq_dpu import DpuOverlay
import fhnwtoys.inference as fh
def sine_window(N, num_frames):
window = np.zeros((fh.frames_to_consider,), dtype=np.float32)
for n in range(num_frames):
window[n] = math.sin((n + 1) / (N + 1) * math.pi)
return window
def get_dict():
return {'start': [], 'end': []}
def main():
# Set up the DPU IP
overlay = DpuOverlay(str(fh.dir_dpu / fh.dpu_bit_file))
overlay.load_model(str(fh.dir_dpu / fh.dpu_assembly_file))
# Set up the Neural Network Runtime (N2Cube)
kernel_name = fh.kernel_name
kernel_conv_input = fh.kernel_conv_input
kernel_fc_output = fh.kernel_fc_output
n2cube.dpuOpen()
kernel = n2cube.dpuLoadKernel(kernel_name)
task = n2cube.dpuCreateTask(kernel, 0)
input_tensor_size = n2cube.dpuGetInputTensorSize(task, kernel_conv_input)
output_tensor_size = n2cube.dpuGetOutputTensorSize(task, kernel_fc_output)
output_tensor_channel = n2cube.dpuGetOutputTensorChannel(task, kernel_fc_output)
output_tensor_address = n2cube.dpuGetOutputTensorAddress(task, kernel_fc_output)
output_tensor_scale = n2cube.dpuGetOutputTensorScale(task, kernel_fc_output)
# libcamera
libcamera = ctypes.CDLL(fh.dir_cam / fh.libcamera_file)
libcamera.get_frame_ptr.restype = ctypes.POINTER(ctypes.c_ubyte)
libcamera.get_throw_bgn_idx.restype = ctypes.c_uint
libcamera.get_throw_end_idx.restype = ctypes.c_uint
libcamera.get_throw_bgn.restype = ctypes.c_bool
libcamera.get_throw_end.restype = ctypes.c_bool
libcamera.set_frame_rate.restype = None
libcamera.set_buff_size.restype = None
libcamera.set_exposure_time.restype = None
libcamera.set_camera_gain.restype = None
libcamera.set_avg_diffs.restype = None
libcamera.set_threshold_mult.restype = None
libcamera.set_frames_to_acquire.restype = None
libcamera.initialize.restype = ctypes.c_int
libcamera.start_acquisition.restype = ctypes.c_int
libcamera.terminate.restype = ctypes.c_int
# Set up of variables
frames = np.empty((fh.frames_to_consider,) + fh.bgr_shape, dtype=np.uint8)
# Initialize Camera
initialize = libcamera.initialize()
if initialize != fh.ReturnCodes.SUCCESS:
try:
return_code = fh.ReturnCodes(initialize).name
except ValueError:
return_code = initialize
print(f'Initialization failed: {return_code}')
sys.exit()
else:
print('================================= READY =================================')
# Reset predictions
predictions = np.zeros((fh.frames_to_consider, fh.num_objects), dtype=np.float32)
# Start acquisition (Threaded)
t = Thread(target=libcamera.start_acquisition)
t.start()
# Wait until the throw has ended
while not libcamera.get_throw_end():
pass
stages = ['Get raw bayer', 'Transform color', 'Resize', 'Normalize', 'Run inference', 'Softmax', 'Weighting']
meas_time = {s: get_dict() for s in stages}
throw_bgn_idx = libcamera.get_throw_bgn_idx()
throw_end_idx = libcamera.get_throw_end_idx()
num_frames = throw_end_idx - throw_bgn_idx - 1 # Ignore the last two captured frames
for idx, frame_id in enumerate(range(throw_bgn_idx, throw_end_idx - 1)):
meas_time['Get raw bayer']['start'].append(datetime.now())
frame_ptr = libcamera.get_frame_ptr(frame_id)
raw_frame = np.ctypeslib.as_array(frame_ptr, shape=fh.raw_shape)
meas_time['Get raw bayer']['end'].append(datetime.now())
# Transform Baumer BayerRG8 to BGR8 (Baumer BayerRG ≙ OpenCV BayerBG)
meas_time['Transform color']['start'].append(datetime.now())
frames[idx] = cv2.cvtColor(raw_frame, cv2.COLOR_BayerBG2BGR)
meas_time['Transform color']['end'].append(datetime.now())
meas_time['Resize']['start'].append(datetime.now())
frame_resized = cv2.resize(frames[idx], fh.inf_dsize, interpolation=fh.Interpolation.NEAREST)
meas_time['Resize']['end'].append(datetime.now())
meas_time['Normalize']['start'].append(datetime.now())
frame_inference = frame_resized.astype(np.float32) / 255.0
meas_time['Normalize']['end'].append(datetime.now())
meas_time['Run inference']['start'].append(datetime.now())
n2cube.dpuSetInputTensorInHWCFP32(task, kernel_conv_input, frame_inference, input_tensor_size)
n2cube.dpuRunTask(task)
meas_time['Run inference']['end'].append(datetime.now())
# n2cube.dpuRunSoftmax(.) sometimes returns all zeros except one NaN
# This section replaces the first occurrence of NaN in the prediction array with 1.0 and sets everything else to 0.0
meas_time['Softmax']['start'].append(datetime.now())
prediction = n2cube.dpuRunSoftmax(output_tensor_address, output_tensor_channel, output_tensor_size//output_tensor_channel, output_tensor_scale)
nan = np.isnan(prediction)
if nan.any():
nan_idx = nan.argmax() # return the index of the first occurrence of NaN
prediction = np.zeros((fh.num_objects,), dtype=np.float32)
prediction[nan_idx] = 1.0
predictions[idx] = prediction
meas_time['Softmax']['end'].append(datetime.now())
if idx == fh.frames_to_consider - 1:
break
meas_time['Weighting']['start'].append(datetime.now())
num_frames_considered = min(fh.frames_to_consider, num_frames)
window = sine_window(num_frames, num_frames_considered) # weighting
weighted_prediction = np.matmul(window, predictions) / np.sum(window)
meas_time['Weighting']['end'].append(datetime.now())
for k in meas_time:
meas_time[k] = [(e - s).total_seconds() * 1000 for s, e in zip(meas_time[k]['start'], meas_time[k]['end'])]
meas_time[k] = sum(meas_time[k]) / len(meas_time[k])
# create output file
mmax = 0
for s in stages:
if len(s) > mmax:
mmax = len(s)
output = f'Number of captured frames: {num_frames_considered}\n\n'
for idx, s in enumerate(stages):
output += f'{s}:{" "*(mmax - len(stages[idx]))} {meas_time[s]:.3f} ms\n'
output += f'\nSum:{" "*(mmax - len("Sum"))} {sum(meas_time.values()):.3f} ms\n'
output += f'Frame rate:{" "*(mmax - len("Frame rate"))} {1000 / sum(meas_time.values()):.3f} fps\n'
print(output)
with open(fh.dir_verification / 'throughput.log', 'w') as f:
f.write(output)
# Wait until the camera thread (process due to ctypes) is terminated
t.join()
# Terminate Camera
terminate = libcamera.terminate()
# Clean up the DPU IP
n2cube.dpuDestroyKernel(kernel)
n2cube.dpuDestroyTask(task)
if __name__ == '__main__':
main()
|
base_crash_reporter.py
|
# Electrum - lightweight Bitcoin client
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import json
import locale
import traceback
import sys
from .version import ELECTRUM_VERSION
from . import constants
from .i18n import _
from .util import make_aiohttp_session
from .logging import describe_os_version, Logger, get_git_version
class BaseCrashReporter(Logger):
report_server = "https://crashhub.kotocoin.info"
config_key = "show_crash_reporter"
issue_template = """<h2>Traceback</h2>
<pre>
{traceback}
</pre>
<h2>Additional information</h2>
<ul>
<li>Electrum version: {app_version}</li>
<li>Python version: {python_version}</li>
<li>Operating system: {os}</li>
<li>Wallet type: {wallet_type}</li>
<li>Locale: {locale}</li>
</ul>
"""
CRASH_MESSAGE = _('Something went wrong while executing Electrum.')
CRASH_TITLE = _('Sorry!')
REQUEST_HELP_MESSAGE = _('To help us diagnose and fix the problem, you can send us a bug report that contains '
'useful debug information:')
DESCRIBE_ERROR_MESSAGE = _("Please briefly describe what led to the error (optional):")
ASK_CONFIRM_SEND = _("Do you want to send this report?")
def __init__(self, exctype, value, tb):
Logger.__init__(self)
self.exc_args = (exctype, value, tb)
def send_report(self, asyncio_loop, proxy, endpoint="/crash", *, timeout=None):
if constants.net.GENESIS[-4:] not in ["4943", "e26f"] and ".electrum.org" in BaseCrashReporter.report_server:
# Gah! Some kind of altcoin wants to send us crash reports.
raise Exception(_("Missing report URL."))
report = self.get_traceback_info()
report.update(self.get_additional_info())
report = json.dumps(report)
coro = self.do_post(proxy, BaseCrashReporter.report_server + endpoint, data=report)
response = asyncio.run_coroutine_threadsafe(coro, asyncio_loop).result(timeout)
return response
async def do_post(self, proxy, url, data):
async with make_aiohttp_session(proxy) as session:
async with session.post(url, data=data, raise_for_status=True) as resp:
return await resp.text()
def get_traceback_info(self):
exc_string = str(self.exc_args[1])
stack = traceback.extract_tb(self.exc_args[2])
readable_trace = "".join(traceback.format_list(stack))
id = {
"file": stack[-1].filename,
"name": stack[-1].name,
"type": self.exc_args[0].__name__
}
return {
"exc_string": exc_string,
"stack": readable_trace,
"id": id
}
def get_additional_info(self):
args = {
"app_version": get_git_version() or ELECTRUM_VERSION,
"python_version": sys.version,
"os": describe_os_version(),
"wallet_type": "unknown",
"locale": locale.getdefaultlocale()[0] or "?",
"description": self.get_user_description()
}
try:
args["wallet_type"] = self.get_wallet_type()
except:
# Maybe the wallet isn't loaded yet
pass
return args
def _get_traceback_str(self) -> str:
return "".join(traceback.format_exception(*self.exc_args))
def get_report_string(self):
info = self.get_additional_info()
info["traceback"] = self._get_traceback_str()
return self.issue_template.format(**info)
def get_user_description(self):
raise NotImplementedError
def get_wallet_type(self) -> str:
raise NotImplementedError
def trigger_crash():
# note: do not change the type of the exception, the message,
# or the name of this method. All reports generated through this
# method will be grouped together by the crash reporter, and thus
# don't spam the issue tracker.
class TestingException(Exception):
pass
def crash_test():
raise TestingException("triggered crash for testing purposes")
import threading
t = threading.Thread(target=crash_test)
t.start()
|
__init__.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2021 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
QR Code and Micro QR Code implementation.
"QR Code" and "Micro QR Code" are registered trademarks of DENSO WAVE INCORPORATED.
"""
from __future__ import absolute_import, unicode_literals
import sys
import io
from . import encoder
from .encoder import DataOverflowError
from . import writers, utils
try: # pragma: no cover
str_type = basestring # noqa: F821
except NameError: # pragma: no cover
str_type = str
__version__ = '1.3.4.dev'
__all__ = ('make', 'make_qr', 'make_micro', 'make_sequence', 'QRCode',
'QRCodeSequence', 'DataOverflowError')
# <https://wiki.python.org/moin/PortingToPy3k/BilingualQuickRef#New_Style_Classes>
__metaclass__ = type
def make(content, error=None, version=None, mode=None, mask=None, encoding=None,
eci=False, micro=None, boost_error=True):
"""\
Creates a (Micro) QR Code.
This is main entry point to create QR Codes and Micro QR Codes.
Aside from `content`, all parameters are optional and an optimal (minimal)
(Micro) QR code with a maximal error correction level is generated.
:param content: The data to encode. Either a Unicode string, an integer or
bytes. If bytes are provided, the `encoding` parameter should be
used to specify the used encoding.
:type content: str, int, bytes
:param error: Error correction level. If ``None`` (default), error
correction level ``L`` is used (note: Micro QR Code version M1 does
not support any error correction. If an explicit error correction
level is used, a M1 QR code won't be generated).
Valid values: ``None`` (allowing generation of M1 codes or use error
correction level "L" or better see :paramref:`boost_error <segno.make.boost_error>`),
"L", "M", "Q", "H" (error correction level "H" isn't available for
Micro QR Codes).
===================================== ===========================
Error correction level Error correction capability
===================================== ===========================
L (Segno's default unless version M1) recovers 7% of data
M recovers 15% of data
Q recovers 25% of data
H (not available for Micro QR Codes) recovers 30% of data
===================================== ===========================
Higher error levels may require larger QR codes (see also
:paramref:`version <segno.make.version>` parameter).
The `error` parameter is case insensitive.
See also the :paramref:`boost_error <segno.make.boost_error>` parameter.
:type error: str or None
:param version: QR Code version. If the value is ``None`` (default), the
minimal version which fits for the input data will be used.
Valid values: "M1", "M2", "M3", "M4" (for Micro QR codes) or an
integer between 1 and 40 (for QR codes).
The `version` parameter is case insensitive.
:type version: int, str or None
:param mode: "numeric", "alphanumeric", "byte", "kanji" or "hanzi".
If the value is ``None`` (default) the appropriate mode will
automatically be determined.
If `version` refers to a Micro QR code, this function may raise a
:py:exc:`ValueError` if the provided `mode` is not supported.
The `mode` parameter is case insensitive.
============ =======================
Mode (Micro) QR Code Version
============ =======================
numeric 1 - 40, M1, M2, M3, M4
alphanumeric 1 - 40, M2, M3, M4
byte 1 - 40, M3, M4
kanji 1 - 40, M3, M4
hanzi 1 - 40
============ =======================
.. note::
The Hanzi mode may not be supported by all QR code readers since
it is not part of ISO/IEC 18004:2015(E).
For this reason, this mode must be specified explicitly by the
user::
import segno
qr = segno.make('书读百遍其义自现', mode='hanzi')
:type mode: str or None
:param mask: Data mask. If the value is ``None`` (default), the
appropriate data mask is chosen automatically. If the `mask`
parameter is provided, this function may raise a :py:exc:`ValueError`
if the mask is invalid.
:type mask: int or None
:param encoding: Indicates the encoding in mode "byte". By default
(`encoding` is ``None``) the implementation tries to use the
standard conform ISO/IEC 8859-1 encoding and if it does not fit, it
will use UTF-8. Note that no ECI mode indicator is inserted by
default (see :paramref:`eci <segno.make.eci>`).
The `encoding` parameter is case insensitive.
:type encoding: str or None
:param bool eci: Indicates if binary data which does not use the default
encoding (ISO/IEC 8859-1) should enforce the ECI mode. Since a lot
of QR code readers do not support the ECI mode, this feature is
disabled by default and the data is encoded in the provided
`encoding` using the usual "byte" mode. Set `eci` to ``True`` if
an ECI header should be inserted into the QR Code. Note that
the implementation may not know the ECI designator for the provided
`encoding` and may raise an exception if the ECI designator cannot
be found.
The ECI mode is not supported by Micro QR Codes.
:param micro: If :paramref:`version <segno.make.version>` is ``None`` (default)
this parameter can be used to allow the creation of a Micro QR code.
If set to ``False``, a QR code is generated. If set to
``None`` (default) a Micro QR code may be generated if applicable.
If set to ``True`` the algorithm generates a Micro QR Code or
raises an exception if the `mode` is not compatible or the `content`
is too large for Micro QR codes.
:type micro: bool or None
:param bool boost_error: Indicates if the error correction level may be
increased if it does not affect the version (default: ``True``).
If set to ``True``, the :paramref:`error <segno.make.error>`
parameter is interpreted as minimum error level. If set to ``False``,
the resulting (Micro) QR code uses the provided `error` level
(or the default error correction level, if error is ``None``)
:raises: :py:exc:`ValueError` or :py:exc:`DataOverflowError`: In case the
data does not fit into a (Micro) QR Code or it does not fit into
the provided :paramref:`version`.
:rtype: QRCode
"""
return QRCode(encoder.encode(content, error, version, mode, mask, encoding,
eci, micro, boost_error=boost_error))
def make_qr(content, error=None, version=None, mode=None, mask=None,
encoding=None, eci=False, boost_error=True):
"""\
Creates a QR code (never a Micro QR code).
See :py:func:`make` for a description of the parameters.
:rtype: QRCode
"""
return make(content, error=error, version=version, mode=mode, mask=mask,
encoding=encoding, eci=eci, micro=False, boost_error=boost_error)
def make_micro(content, error=None, version=None, mode=None, mask=None,
encoding=None, boost_error=True):
"""\
Creates a Micro QR code.
See :py:func:`make` for a description of the parameters.
Note: Error correction level "H" isn't available for Micro QR codes. If
used, this function raises a :py:class:`segno.ErrorLevelError`.
:rtype: QRCode
"""
return make(content, error=error, version=version, mode=mode, mask=mask,
encoding=encoding, micro=True, boost_error=boost_error)
def make_sequence(content, error=None, version=None, mode=None, mask=None,
encoding=None, boost_error=True, symbol_count=None):
"""\
Creates a sequence of QR codes using the Structured Append mode.
If the content fits into one QR code and neither ``version`` nor
``symbol_count`` is provided, this function may return a sequence with
one QR Code which does not use the Structured Append mode. Otherwise a
sequence of 2 .. n (max. n = 16) QR codes is returned which use the
Structured Append mode.
The Structured Append mode allows to split the content over a number
(max. 16) QR Codes.
The Structured Append mode isn't available for Micro QR Codes, therefor
the returned sequence contains QR codes, only.
Since this function returns an iterable object, it may be used as follows:
.. code-block:: python
for i, qrcode in enumerate(segno.make_sequence(data, symbol_count=2)):
qrcode.save('seq-%d.svg' % i, scale=10, color='darkblue')
The number of QR codes is determined by the `version` or `symbol_count`
parameter.
See :py:func:`make` for a description of the other parameters.
:param int symbol_count: Number of symbols.
:rtype: QRCodeSequence
"""
return QRCodeSequence(map(QRCode,
encoder.encode_sequence(content, error=error,
version=version,
mode=mode, mask=mask,
encoding=encoding,
boost_error=boost_error,
symbol_count=symbol_count)))
class QRCode:
"""\
Represents a (Micro) QR Code.
"""
__slots__ = ('matrix', 'mask', '_version', '_error', '_mode')
def __init__(self, code):
"""\
Initializes the QR Code object.
:param code: An object with a ``matrix``, ``version``, ``error``,
``mask`` and ``segments`` attribute.
"""
self.matrix = code.matrix
"""Returns the matrix.
:rtype: tuple of :py:class:`bytearray` instances.
"""
self.mask = code.mask
"""Returns the data mask pattern reference
:rtype: int
"""
self._version = code.version
self._error = code.error
self._mode = code.segments[0].mode if len(code.segments) == 1 else None
@property
def version(self):
"""\
(Micro) QR Code version. Either a string ("M1", "M2", "M3", "M4") or
an integer in the range of 1 .. 40.
:rtype: str or int
"""
return encoder.get_version_name(self._version)
@property
def error(self):
"""\
Error correction level; either a string ("L", "M", "Q", "H") or ``None``
if the QR code provides no error correction (Micro QR Code version M1)
:rtype: str
"""
if self._error is None:
return None
return encoder.get_error_name(self._error)
@property
def mode(self):
"""\
String indicating the mode ("numeric", "alphanumeric", "byte", "kanji",
or "hanzi").
May be ``None`` if multiple modes are used.
:rtype: str or None
"""
if self._mode is not None:
return encoder.get_mode_name(self._mode)
return None
@property
def designator(self):
"""\
Returns the version and error correction level as string `V-E` where
`V` represents the version number and `E` the error level.
:rtype: str
"""
version = str(self.version)
return '-'.join((version, self.error) if self.error else (version,))
@property
def default_border_size(self):
"""\
Indicates the default border size aka quiet zone.
QR Codes have a quiet zone of four light modules, while Micro QR Codes
have a quiet zone of two light modules.
:rtype: int
"""
return utils.get_default_border_size(self._version)
@property
def is_micro(self):
"""\
Indicates if this QR code is a Micro QR code
:rtype: bool
"""
return self._version < 1
def __eq__(self, other):
return self.__class__ == other.__class__ and self.matrix == other.matrix
__hash__ = None
def symbol_size(self, scale=1, border=None):
"""\
Returns the symbol size (width x height) with the provided border and
scaling factor.
:param scale: Indicates the size of a single module (default: 1).
The size of a module depends on the used output format; i.e.
in a PNG context, a scaling factor of 2 indicates that a module
has a size of 2 x 2 pixel. Some outputs (i.e. SVG) accept
floating point values.
:type scale: int or float
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:rtype: tuple (width, height)
"""
return utils.get_symbol_size(self._version, scale=scale, border=border)
def matrix_iter(self, scale=1, border=None, verbose=False):
"""\
Returns an iterator over the matrix which includes the border.
The border is returned as sequence of light modules.
Dark modules are reported as ``0x1``, light modules have the value
``0x0``.
The following example converts the QR code matrix into a list of
lists which use boolean values for the modules (True = dark module,
False = light module)::
>>> import segno
>>> qr = segno.make('The Beatles')
>>> width, height = qr.symbol_size(scale=2)
>>> res = []
>>> # Scaling factor 2, default border
>>> for row in qr.matrix_iter(scale=2):
>>> res.append([col == 0x1 for col in row])
>>> width == len(res[0])
True
>>> height == len(res)
True
If `verbose` is ``True``, the iterator returns integer constants which
indicate the type of the module, i.e. ``segno.consts.TYPE_FINDER_PATTERN_DARK``,
``segno.consts.TYPE_FINDER_PATTERN_LIGHT``, ``segno.consts.TYPE_QUIET_ZONE`` etc.
To check if the returned module type is dark or light, use::
if mt >> 8:
print('dark module')
if not mt >> 8:
print('light module')
:param int scale: The scaling factor (default: ``1``).
:param int border: The size of border / quiet zone or ``None`` to
indicate the default border.
:param bool verbose: Indicates if the type of the module should be returned
instead of ``0x1`` and ``0x0`` values.
See :py:mod:`segno.consts` for the return values.
This feature is currently in EXPERIMENTAL state.
:raises: :py:exc:`ValueError` if the scaling factor or the border is
invalid (i.e. negative).
"""
iterfn = utils.matrix_iter_verbose if verbose else utils.matrix_iter
return iterfn(self.matrix, self._version, scale, border)
def show(self, delete_after=20, scale=10, border=None, dark='#000',
light='#fff'): # pragma: no cover
"""\
Displays this QR code.
This method is mainly intended for debugging purposes.
This method saves the QR code as an image (by default with a scaling
factor of 10) to a temporary file and opens it with the standard PNG
viewer application or within the standard webbrowser.
The temporary file is deleted afterwards (unless
:paramref:`delete_after <segno.QRCode.show.delete_after>` is set to ``None``).
If this method does not show any result, try to increase the
:paramref:`delete_after <segno.QRCode.show.delete_after>` value or set
it to ``None``
:param delete_after: Time in seconds to wait till the temporary file is
deleted.
:type delete_after: int or None
:param int scale: Integer indicating the size of a single module.
:param border: Integer indicating the size of the quiet zone.
If set to ``None`` (default), the recommended border size
will be used.
:type border: int or None
:param dark: The color of the dark modules (default: black).
:param light: The color of the light modules (default: white).
"""
import os
import time
import tempfile
import webbrowser
import threading
try: # Python 3
from urllib.parse import urljoin
from urllib.request import pathname2url
except ImportError: # Python 2
from urlparse import urljoin # noqa
from urllib import pathname2url # noqa
def delete_file(name):
time.sleep(delete_after)
try:
os.unlink(name)
except OSError:
pass
f = tempfile.NamedTemporaryFile('wb', suffix='.png', delete=False)
try:
self.save(f, scale=scale, dark=dark, light=light, border=border)
except: # noqa: E722
f.close()
os.unlink(f.name)
raise
f.close()
webbrowser.open_new_tab(urljoin('file:', pathname2url(f.name)))
if delete_after is not None:
t = threading.Thread(target=delete_file, args=(f.name,))
t.start()
def svg_data_uri(self, xmldecl=False, encode_minimal=False,
omit_charset=False, nl=False, **kw):
"""\
Converts the QR code into a SVG data URI.
The XML declaration is omitted by default (set
:paramref:`xmldecl <segno.QRCode.svg_data_uri.xmldecl>` to ``True``
to enable it), further the newline is omitted by default (set ``nl`` to
``True`` to enable it).
Aside from the missing `out` parameter, the different `xmldecl` and
`nl` default values, and the additional parameters
:paramref:`encode_minimal <segno.QRCode.svg_data_uri.encode_minimal>`
and :paramref:`omit_charset <segno.QRCode.svg_data_uri.omit_charset>`,
this method uses the same parameters as the usual SVG serializer, see
:py:func:`save` and the available `SVG parameters <#svg>`_
.. note::
In order to embed a SVG image in HTML without generating a file, the
:py:func:`svg_inline` method could serve better results, as it
usually produces a smaller output.
:param bool xmldecl: Indicates if the XML declaration should be
serialized (default: ``False``)
:param bool encode_minimal: Indicates if the resulting data URI should
use minimal percent encoding (disabled by default).
:param bool omit_charset: Indicates if the ``;charset=...`` should be omitted
(disabled by default)
:param bool nl: Indicates if the document should have a trailing newline
(default: ``False``)
:rtype: str
"""
return writers.as_svg_data_uri(self.matrix, self._version,
xmldecl=xmldecl, nl=nl,
encode_minimal=encode_minimal,
omit_charset=omit_charset, **kw)
def svg_inline(self, **kw):
"""\
Returns a SVG representation which is embeddable into HTML5 contexts.
Due to the fact that HTML5 directly supports SVG, various elements of
a SVG document can or should be suppressed (i.e. the XML declaration and
the SVG namespace).
This method returns a string that can be used in an HTML context.
This method uses the same parameters as the usual SVG serializer, see
:py:func:`save` and the available `SVG parameters <#svg>`_
The returned string can be used directly in Jinja / Django templates,
provided the ``safe`` filter is used::
<div>{{ qr.svg_inline(dark='#228b22', scale=3) | safe }}</div>
:rtype: str
"""
buff = io.BytesIO()
self.save(buff, kind='svg', xmldecl=False, svgns=False, nl=False, **kw)
return buff.getvalue().decode(kw.get('encoding', 'utf-8'))
def png_data_uri(self, **kw):
"""\
Converts the QR code into a PNG data URI.
Uses the same keyword parameters as the usual PNG serializer,
see :py:func:`save` and the available `PNG parameters <#png>`_
:rtype: str
"""
return writers.as_png_data_uri(self.matrix, self._version, **kw)
def terminal(self, out=None, border=None, compact=False):
"""\
Serializes the matrix as ANSI escape code.
Under Windows, no ANSI escape sequence is generated but the Windows
API is used *unless* :paramref:`out <segno.QRCode.terminal.out>`
is a writable object or using WinAPI fails.
:param out: Filename or a file-like object supporting to write text.
If ``None`` (default), the matrix is written to :py:class:`sys.stdout`.
:param int border: Integer indicating the size of the quiet zone.
If set to ``None`` (default), the recommended border size
will be used (``4`` for QR Codes, ``2`` for Micro QR Codes).
"""
if out is None and sys.platform == 'win32': # pragma: no cover
# Windows < 10 does not support ANSI escape sequences, try to
# call the a Windows specific terminal output which uses the
# Windows API.
try:
writers.write_terminal_win(self.matrix, self._version, border)
except OSError:
# Use the standard output even if it may print garbage
writers.write_terminal(self.matrix, self._version, sys.stdout,
border)
elif compact:
writers.write_terminal_compact(self.matrix, self._version, out or sys.stdout,
border)
else:
writers.write_terminal(self.matrix, self._version, out or sys.stdout,
border)
def save(self, out, kind=None, **kw):
"""\
Serializes the QR code in one of the supported formats.
The serialization format depends on the filename extension.
.. _common_keywords:
**Common keywords**
========== ==============================================================
Name Description
========== ==============================================================
scale Integer or float indicating the size of a single module.
Default: 1. The interpretation of the scaling factor depends
on the serializer. For pixel-based output (like :ref:`PNG <png>`)
the scaling factor is interepreted as pixel-size (1 = 1 pixel).
:ref:`EPS <eps>` interprets ``1`` as 1 point (1/72 inch) per
module.
Some serializers (like :ref:`SVG <svg>`) accept float values.
If the serializer does not accept float values, the value will be
converted to an integer value (note: int(1.6) == 1).
border Integer indicating the size of the quiet zone.
If set to ``None`` (default), the recommended border size
will be used (``4`` for QR codes, ``2`` for a Micro QR codes).
A value of ``0`` indicates that border should be omitted.
dark A string or tuple representing a color value for the dark
modules. The default value is "black". The color can be
provided as ``(R, G, B)`` tuple, as web color name
(like "red") or in hexadecimal format (``#RGB`` or
``#RRGGBB``). Some serializers (i.e. :ref:`SVG <svg>` and
:ref:`PNG <png>`) accept an alpha transparency value like
``#RRGGBBAA``.
light A string or tuple representing a color for the light modules.
See `dark` for valid values.
The default value depends on the serializer. :ref:`SVG <svg>`
uses no color (``None``) for light modules by default, other
serializers, like :ref:`PNG <png>`, use "white" as default
light color.
========== ==============================================================
.. _module_colors:
**Module Colors**
=============== =======================================================
Name Description
=============== =======================================================
finder_dark Color of the dark modules of the finder patterns
Default: undefined, use value of "dark"
finder_light Color of the light modules of the finder patterns
Default: undefined, use value of "light"
data_dark Color of the dark data modules
Default: undefined, use value of "dark"
data_light Color of the light data modules.
Default: undefined, use value of "light".
version_dark Color of the dark modules of the version information.
Default: undefined, use value of "dark".
version_light Color of the light modules of the version information,
Default: undefined, use value of "light".
format_dark Color of the dark modules of the format information.
Default: undefined, use value of "dark".
format_light Color of the light modules of the format information.
Default: undefined, use value of "light".
alignment_dark Color of the dark modules of the alignment patterns.
Default: undefined, use value of "dark".
alignment_light Color of the light modules of the alignment patterns.
Default: undefined, use value of "light".
timing_dark Color of the dark modules of the timing patterns.
Default: undefined, use value of "dark".
timing_light Color of the light modules of the timing patterns.
Default: undefined, use value of "light".
separator Color of the separator.
Default: undefined, use value of "light".
dark_module Color of the dark module (a single dark module which
occurs in all QR Codes but not in Micro QR Codes.
Default: undefined, use value of "dark".
quiet_zone Color of the quiet zone / border.
Default: undefined, use value of "light".
=============== =======================================================
.. _svg:
**Scalable Vector Graphics (SVG)**
All :ref:`common keywords <common_keywords>` and :ref:`module colors <module_colors>`
are supported.
================ ==============================================================
Name Description
================ ==============================================================
out Filename or :py:class:`io.BytesIO`
kind "svg" or "svgz" (to create a gzip compressed SVG)
scale integer or float
dark Default: "#000" (black)
``None`` is a valid value. If set to ``None``, the resulting
path won't have a "stroke" attribute. The "stroke" attribute
may be defined via CSS (external).
If an alpha channel is defined, the output depends of the
used SVG version. For SVG versions >= 2.0, the "stroke"
attribute will have a value like "rgba(R, G, B, A)", otherwise
the path gets another attribute "stroke-opacity" to emulate
the alpha channel.
To minimize the document size, the SVG serializer uses
automatically the shortest color representation: If
a value like "#000000" is provided, the resulting
document will have a color value of "#000". If the color
is "#FF0000", the resulting color is not "#F00", but
the web color name "red".
light Default value ``None``. If this parameter is set to another
value, the resulting image will have another path which
is used to define the color of the light modules.
If an alpha channel is used, the resulting path may
have a "fill-opacity" attribute (for SVG version < 2.0)
or the "fill" attribute has a "rgba(R, G, B, A)" value.
xmldecl Boolean value (default: ``True``) indicating whether the
document should have an XML declaration header.
Set to ``False`` to omit the header.
svgns Boolean value (default: ``True``) indicating whether the
document should have an explicit SVG namespace declaration.
Set to ``False`` to omit the namespace declaration.
The latter might be useful if the document should be
embedded into a HTML 5 document where the SVG namespace
is implicitly defined.
title String (default: ``None``) Optional title of the generated
SVG document.
desc String (default: ``None``) Optional description of the
generated SVG document.
svgid A string indicating the ID of the SVG document
(if set to ``None`` (default), the SVG element won't have
an ID).
svgclass Default: "segno". The CSS class of the SVG document
(if set to ``None``, the SVG element won't have a class).
lineclass Default: "qrline". The CSS class of the path element
(which draws the dark modules (if set to ``None``, the path
won't have a class).
omitsize Indicates if width and height attributes should be
omitted (default: ``False``). If these attributes are
omitted, a ``viewBox`` attribute will be added to the
document.
unit Default: ``None``
Indicates the unit for width / height and other coordinates.
By default, the unit is unspecified and all values are
in the user space.
Valid values: em, ex, px, pt, pc, cm, mm, in, and percentages
(any string is accepted, this parameter is not validated
by the serializer)
encoding Encoding of the XML document. "utf-8" by default.
svgversion SVG version (default: ``None``). If specified (a float),
the resulting document has an explicit "version" attribute.
If set to ``None``, the document won't have a "version"
attribute. This parameter is not validated.
compresslevel Default: 9. This parameter is only valid, if a compressed
SVG document should be created (file extension "svgz").
1 is fastest and produces the least compression, 9 is slowest
and produces the most. 0 is no compression.
draw_transparent Indicates if transparent SVG paths should be
added to the graphic (default: ``False``)
nl Indicates if the document should have a trailing newline
(default: ``True``)
================ ==============================================================
.. _png:
**Portable Network Graphics (PNG)**
This writes either a grayscale (maybe with transparency) PNG (color type 0)
or a palette-based (maybe with transparency) image (color type 3).
If the dark / light values are ``None``, white or black, the serializer
chooses the more compact grayscale mode, in all other cases a palette-based
image is written.
All :ref:`common keywords <common_keywords>` and :ref:`module colors <module_colors>`
are supported.
=============== ==============================================================
Name Description
=============== ==============================================================
out Filename or :py:class:`io.BytesIO`
kind "png"
scale integer
dark Default: "#000" (black)
``None`` is a valid value iff light is not ``None``.
If set to ``None``, the dark modules become transparent.
light Default value "#fff" (white)
See keyword "dark" for further details.
compresslevel Default: 9. Integer indicating the compression level
for the ``IDAT`` (data) chunk.
1 is fastest and produces the least compression, 9 is slowest
and produces the most. 0 is no compression.
dpi Default: ``None``. Specifies the DPI value for the image.
By default, the DPI value is unspecified. Please note
that the DPI value is converted into meters (maybe with
rounding errors) since PNG does not support the unit
"dots per inch".
=============== ==============================================================
.. _eps:
**Encapsulated PostScript (EPS)**
All :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.StringIO`
kind "eps"
scale integer or float
dark Default: "#000" (black)
light Default value: ``None`` (transparent light modules)
============= ==============================================================
.. _pdf:
**Portable Document Format (PDF)**
All :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.BytesIO`
kind "pdf"
scale integer or float
dark Default: "#000" (black)
light Default value: ``None`` (transparent light modules)
compresslevel Default: 9. Integer indicating the compression level.
1 is fastest and produces the least compression, 9 is slowest
and produces the most. 0 is no compression.
============= ==============================================================
.. _txt:
**Text (TXT)**
Aside of "scale", all :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.StringIO`
kind "txt"
dark Default: "1"
light Default: "0"
============= ==============================================================
.. _ansi:
**ANSI escape code**
Supports the "border" keyword, only!
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.StringIO`
kind "ans"
============= ==============================================================
.. _pbm:
**Portable Bitmap (PBM)**
All :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.BytesIO`
kind "pbm"
scale integer
plain Default: False. Boolean to switch between the P4 and P1 format.
If set to ``True``, the (outdated) P1 serialization format is
used.
============= ==============================================================
.. _pam:
**Portable Arbitrary Map (PAM)**
All :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.BytesIO`
kind "pam"
scale integer
dark Default: "#000" (black).
light Default value "#fff" (white). Use ``None`` for transparent
light modules.
============= ==============================================================
.. _ppm:
**Portable Pixmap (PPM)**
All :ref:`common keywords <common_keywords>` and :ref:`module colors <module_colors>`
are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.BytesIO`
kind "ppm"
scale integer
dark Default: "#000" (black).
light Default value "#fff" (white).
============= ==============================================================
.. _latex:
**LaTeX / PGF/TikZ**
To use the output of this serializer, the ``PGF/TikZ`` (and optionally
``hyperref``) package is required in the LaTeX environment. The
serializer itself does not depend on any external packages.
All :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.StringIO`
kind "tex"
scale integer or float
dark LaTeX color name (default: "black"). The color is written
"at it is", please ensure that the color is a standard color
or it has been defined in the enclosing LaTeX document.
url Default: ``None``. Optional URL where the QR code should
point to. Requires the ``hyperref`` package in the LaTeX
environment.
============= ==============================================================
.. _xbm:
**X BitMap (XBM)**
All :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.StringIO`
kind "xbm"
scale integer
name Name of the variable (default: "img")
============= ==============================================================
.. _xpm:
**X PixMap (XPM)**
All :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.StringIO`
kind "xpm"
scale integer
dark Default: "#000" (black).
light Default value "#fff" (white)
``None`` indicates transparent light modules.
name Name of the variable (default: "img")
============= ==============================================================
:param out: A filename or a writable file-like object with a
``name`` attribute. Use the :paramref:`kind <segno.QRCode.save.kind>`
parameter if `out` is a :py:class:`io.BytesIO` or
:py:class:`io.StringIO` stream which don't have a ``name``
attribute.
:param str kind: Default ``None``.
If the desired output format cannot be determined from
the :paramref:`out <segno.QRCode.save.out>` parameter, this
parameter can be used to indicate the serialization format
(i.e. "svg" to enforce SVG output). The value is case
insensitive.
:param kw: Any of the supported keywords by the specific serializer.
"""
writers.save(self.matrix, self._version, out, kind, **kw)
def __getattr__(self, name):
"""\
This is used to plug-in external serializers.
When a "to_<name>" method is invoked, this method tries to find
a ``segno.plugin.converter`` plugin with the provided ``<name>``.
If such a plugin exists, a callable function is returned. The result
of invoking the function depends on the plugin.
"""
if name.startswith('to_'):
from pkg_resources import iter_entry_points
from functools import partial
for ep in iter_entry_points(group='segno.plugin.converter',
name=name[3:]):
plugin = ep.load()
return partial(plugin, self)
raise AttributeError('{0} object has no attribute {1}'
.format(self.__class__, name))
class QRCodeSequence(tuple):
"""\
Represents a sequence of 1 .. n (max. n = 16) :py:class:`QRCode` instances.
Iff this sequence contains only one item, it behaves like :py:class:`QRCode`.
"""
__slots__ = ()
def __new__(cls, qrcodes):
return super(QRCodeSequence, cls).__new__(cls, qrcodes)
def terminal(self, out=None, border=None):
"""\
Serializes the sequence of QR codes as ANSI escape code.
See :py:meth:`QRCode.terminal()` for details.
"""
for qrcode in self:
qrcode.terminal(out=out, border=border)
def save(self, out, kind=None, **kw):
"""\
Saves the sequence of QR codes to `out`.
If `out` is a filename, this method modifies the filename and adds
``<Number of QR codes>-<Current QR code>`` to it.
``structured-append.svg`` becomes (if the sequence contains two QR codes):
``structured-append-02-01.svg`` and ``structured-append-02-02.svg``
Please note that using a file or file-like object may result into an
invalid serialization format since all QR codes are written to the same
output.
See :py:meth:`QRCode.save()` for a detailed enumeration of options.
"""
filename = lambda o, n: o # noqa: E731
m = len(self)
if m > 1 and isinstance(out, str_type):
dot_idx = out.rfind('.')
if dot_idx > -1:
out = out[:dot_idx] + '-{0:02d}-{1:02d}' + out[dot_idx:]
filename = lambda o, n: o.format(m, n) # noqa: E731
for n, qrcode in enumerate(self, start=1):
qrcode.save(filename(out, n), kind=kind, **kw)
def __getattr__(self, item):
"""\
Behaves like :py:class:`QRCode` iff this sequence contains a single item.
"""
if len(self) == 1:
return getattr(self[0], item)
raise AttributeError("{0} object has no attribute '{1}'"
.format(self.__class__, item))
|
test_context.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import gc
import logging
from multiprocessing import Process
import os
import random
import sys
import time
from shellbot import Context
class ContextTests(unittest.TestCase):
def setUp(self):
self.context = Context()
def tearDown(self):
del self.context
collected = gc.collect()
if collected:
logging.info("Garbage collector: collected %d objects." % (collected))
def test_init(self):
settings = {
'bot': {'name': 'testy', 'version': '17.4.1'},
}
self.context = Context(settings)
self.assertEqual(self.context.get('bot.name'), 'testy')
self.assertEqual(self.context.get('bot.version'), '17.4.1')
def test_init_filter(self):
self.context = Context(filter=lambda x, y : x + '...')
self.context.apply({'my.var': 'my value'})
self.context.check('my.var', filter=True)
self.assertEqual(self.context.get('my.var'), 'my value...')
def test_apply(self):
self.assertEqual(self.context.get('port'), None)
settings = {
'spark': {'CISCO_SPARK_BTTN_BOT': 'who_knows'},
'spark.room': 'title',
'DEBUG': True,
'server': {'port': 80, 'url': 'http://www.acme.com/'},
'bot.store': {'planets': ['Uranus', 'Mercury']},
}
self.context.apply(settings)
self.assertEqual(self.context.get('DEBUG'), True)
self.assertEqual(self.context.get('spark.CISCO_SPARK_BTTN_BOT'),
'who_knows')
self.assertEqual(self.context.get('spark.room'), 'title')
self.assertEqual(self.context.get('server.port'), 80)
self.assertEqual(self.context.get('server.url'),
'http://www.acme.com/')
self.assertEqual(self.context.get('bot.store.planets'),
['Uranus', 'Mercury'])
self.assertEqual(self.context.get('bot.store'),
{'planets': ['Uranus', 'Mercury']})
def test_clear(self):
self.assertEqual(self.context.get('port'), None)
settings = {
'spark': {'CISCO_SPARK_BTTN_BOT': 'who_knows'},
'spark.room': 'title',
'DEBUG': True,
'server': {'port': 80, 'url': 'http://www.acme.com/'},
}
self.context.apply(settings)
self.assertEqual(self.context.get('DEBUG'), True)
self.assertEqual(self.context.get('spark.CISCO_SPARK_BTTN_BOT'),
'who_knows')
self.assertEqual(self.context.get('spark.room'), 'title')
self.assertEqual(self.context.get('server.port'), 80)
self.assertEqual(self.context.get('server.url'), 'http://www.acme.com/')
self.context.clear()
self.assertEqual(self.context.get('DEBUG'), None)
self.assertEqual(self.context.get('spark.CISCO_SPARK_BTTN_BOT'), None)
self.assertEqual(self.context.get('spark.room'), None)
self.assertEqual(self.context.get('server.port'), None)
self.assertEqual(self.context.get('server.url'), None)
def test_is_empty(self):
self.assertTrue(self.context.is_empty)
# set a key
self.context.set('hello', 'world')
self.assertEqual(self.context.get('hello'), 'world')
self.assertFalse(self.context.is_empty)
self.context.clear()
self.assertTrue(self.context.is_empty)
settings = {
'spark': {'CISCO_SPARK_BTTN_BOT': 'who_knows'},
'spark.room': 'title',
'DEBUG': True,
'server': {'port': 80, 'url': 'http://www.acme.com/'},
}
self.context.apply(settings)
self.assertFalse(self.context.is_empty)
def test_check(self):
self.assertEqual(self.context.get('spark.room'), None)
settings = {
'spark': {
'room': 'My preferred channel',
'participants':
['alan.droit@azerty.org', 'bob.nard@support.tv'],
'team': 'Anchor team',
'token': 'hkNWEtMJNkODk3ZDZLOGQ0OVGlZWU1NmYtyY',
'weird_token': '$MY_FUZZY_SPARK_TOKEN',
'fuzzy_token': '$MY_FUZZY_SPARK_TOKEN',
'webhook': "http://73a1e282.ngrok.io",
}
}
self.context.apply(settings)
self.context.check('spark.room', is_mandatory=True)
self.assertEqual(self.context.get('spark.room'), 'My preferred channel')
self.context.check('spark.team')
self.assertEqual(self.context.get('spark.team'), 'Anchor team')
self.context.check('spark.*not*present') # will be set to None
self.assertEqual(self.context.get('spark.*not*present'), None)
self.context.check('spark.absent_list', default=[])
self.assertEqual(self.context.get('spark.absent_list'), [])
self.context.check('spark.absent_dict', default={})
self.assertEqual(self.context.get('spark.absent_dict'), {})
self.context.check('spark.absent_text', default='*born')
self.assertEqual(self.context.get('spark.absent_text'), '*born')
# is_mandatory is useless if default is set
self.context.check('spark.*not*present',
default='*born',
is_mandatory=True)
self.assertEqual(self.context.get('spark.*not*present'), '*born')
# missing key
self.assertEqual(self.context.get('spark.*unknown*key*'), None)
# we need the missing key
with self.assertRaises(KeyError):
self.context.check('spark.*unknown*key*',
is_mandatory=True)
# validate implies is_mandatory
with self.assertRaises(KeyError):
self.context.check('spark.*unknown*key*',
validate=lambda line: True)
# exception when is_mandatory is explicit
with self.assertRaises(KeyError):
self.context.check('spark.*unknown*key*',
is_mandatory=True,
filter=True)
# yet filter does not imply is_mandatory by itself
self.context.check('spark.*unknown*key*',
filter=True) # warning in log
# a web link has been set
self.assertEqual(self.context.get('spark.webhook'),
"http://73a1e282.ngrok.io")
# validate http
self.context.check('spark.webhook',
validate=lambda line: line.startswith('http'))
# validate https
with self.assertRaises(ValueError):
self.context.check('spark.webhook',
validate=lambda line: line.startswith('https'))
# a token has been set
self.assertEqual(self.context.get('spark.token'),
'hkNWEtMJNkODk3ZDZLOGQ0OVGlZWU1NmYtyY')
# validate length of token
with self.assertRaises(ValueError):
self.context.check('spark.token',
validate=lambda line: len(line) == 32)
# we rely on the environment for this key
self.assertEqual(self.context.get('spark.weird_token'),
'$MY_FUZZY_SPARK_TOKEN')
# no change to the value
self.context.check('spark.weird_token')
# lookup the environment and change the value to None
self.context.check('spark.weird_token', filter=True) # warning in log
self.assertEqual(self.context.get('spark.weird_token'), None)
# ensure the environment is clean
def clear_env(name):
try:
os.environ.pop(name)
except:
pass
clear_env('MY_FUZZY_SPARK_TOKEN')
# a value based on the environment
self.context.set('spark.fuzzy_token', '$MY_FUZZY_SPARK_TOKEN')
self.context.check('spark.fuzzy_token')
self.assertEqual(self.context.get('spark.fuzzy_token'),
'$MY_FUZZY_SPARK_TOKEN')
# default has no effect, mandatory is ok
self.context.set('spark.fuzzy_token', '$MY_FUZZY_SPARK_TOKEN')
self.context.check('spark.fuzzy_token', default='hello there')
self.context.check('spark.fuzzy_token', is_mandatory=True)
self.assertEqual(self.context.get('spark.fuzzy_token'),
'$MY_FUZZY_SPARK_TOKEN')
# default value is used if key is absent from the environment
self.context.set('spark.fuzzy_token', '$MY_FUZZY_SPARK_TOKEN')
self.context.check('spark.fuzzy_token', default='hello there', filter=True)
self.assertEqual(self.context.get('spark.fuzzy_token'), 'hello there')
# is_mandatory is useless in that case
self.context.set('spark.fuzzy_token', '$MY_FUZZY_SPARK_TOKEN')
self.context.check('spark.fuzzy_token', is_mandatory=True, filter=True)
self.assertEqual(self.context.get('spark.fuzzy_token'), None)
# set the value to ''
self.context.set('spark.fuzzy_token', '$MY_FUZZY_SPARK_TOKEN')
os.environ['MY_FUZZY_SPARK_TOKEN'] = ''
self.context.check('spark.fuzzy_token', filter=True)
self.assertEqual(self.context.get('spark.fuzzy_token'), '')
# set the value to '' -- default value is useless in that case
self.context.set('spark.fuzzy_token', '$MY_FUZZY_SPARK_TOKEN')
os.environ['MY_FUZZY_SPARK_TOKEN'] = ''
self.context.check('spark.fuzzy_token', default='ok?', filter=True)
self.assertEqual(self.context.get('spark.fuzzy_token'), '')
# set the value to 'hello'
self.context.set('spark.fuzzy_token', '$MY_FUZZY_SPARK_TOKEN')
os.environ['MY_FUZZY_SPARK_TOKEN'] = 'hello'
self.context.check('spark.fuzzy_token', filter=True)
self.assertEqual(self.context.get('spark.fuzzy_token'), 'hello')
# set the value to 'hello' -- default value is useless in that case
self.context.set('spark.fuzzy_token', '$MY_FUZZY_SPARK_TOKEN')
os.environ['MY_FUZZY_SPARK_TOKEN'] = 'hello again'
self.context.check('spark.fuzzy_token', default='ok?', filter=True)
self.assertEqual(self.context.get('spark.fuzzy_token'), 'hello again')
# pass the variable name as default value
self.context.set('spark.fuzzy_token', None)
os.environ['MY_FUZZY_SPARK_TOKEN'] = 'hello'
self.context.check('spark.fuzzy_token', default='$MY_FUZZY_SPARK_TOKEN', filter=True)
self.assertEqual(self.context.get('spark.fuzzy_token'), 'hello')
# pass the variable name as default value -- no effect
self.context.set('spark.fuzzy_token', '$MY_FUZZY_SPARK_TOKEN')
os.environ['MY_FUZZY_SPARK_TOKEN'] = 'hello'
self.context.check('spark.fuzzy_token', default='$MY_FUZZY_SPARK_TOKEN', filter=True)
self.assertEqual(self.context.get('spark.fuzzy_token'), 'hello')
# pass as default the name of an empty variable -- tricky case
self.context.set('spark.fuzzy_token', '$MY_FUZZY_SPARK_TOKEN')
clear_env('MY_FUZZY_SPARK_TOKEN')
self.context.check('spark.fuzzy_token', default='$MY_FUZZY_SPARK_TOKEN', filter=True)
self.assertEqual(self.context.get('spark.fuzzy_token'), None)
def test__filter(self):
self.assertEqual(Context._filter(None), None)
self.assertEqual(Context._filter(''), '')
self.assertEqual(Context._filter('ZLOGQ0OVGlZWU1NmYtyY'),
'ZLOGQ0OVGlZWU1NmYtyY')
if os.environ.get('PATH') is not None:
self.assertTrue(Context._filter('$PATH') != '$PATH')
Context._filter('$TOTALLY*UNKNOWN*HERE') # warning in log
def test_has(self):
self.context.apply({
'spark': {
'room': 'My preferred channel',
'participants':
['alan.droit@azerty.org', 'bob.nard@support.tv'],
'team': 'Anchor team',
'token': 'hkNWEtMJNkODk3ZDZLOGQ0OVGlZWU1NmYtyY',
'fuzzy_token': '$MY_FUZZY_SPARK_TOKEN',
'webhook': "http://73a1e282.ngrok.io",
}
})
# undefined prefix
self.assertFalse(self.context.has('hello'))
# top-level prefix
self.assertTrue(self.context.has('spark'))
# 2-level prefix
self.assertTrue(self.context.has('spark.team'))
# undefined 2-level prefix
self.assertFalse(self.context.has('.token'))
def test_getter(self):
# undefined key
self.assertEqual(self.context.get('hello'), None)
# undefined key with default value
whatever = 'whatever'
self.assertEqual(self.context.get('hello', whatever), whatever)
# set a key
self.context.set('hello', 'world')
self.assertEqual(self.context.get('hello'), 'world')
# default value is meaningless when key has been set
self.assertEqual(self.context.get('hello', 'whatever'), 'world')
# except when set to None
self.context.set('special', None)
self.assertEqual(self.context.get('special', []), [])
def test_unicode(self):
self.context.set('hello', 'world')
self.assertEqual(self.context.get('hello'), 'world')
self.assertEqual(self.context.get(u'hello'), 'world')
self.context.set('hello', u'wôrld')
self.assertEqual(self.context.get('hello'), u'wôrld')
self.context.set(u'hello', u'wôrld')
self.assertEqual(self.context.get(u'hello'), u'wôrld')
def test_increment(self):
self.assertEqual(self.context.get('gauge'), None)
value = self.context.increment('gauge')
self.assertEqual(value, 1)
self.context.set('gauge', 'world')
self.assertEqual(self.context.get('gauge'), 'world')
value = self.context.increment('gauge')
self.assertEqual(value, 1)
def test_decrement(self):
self.assertEqual(self.context.get('gauge'), None)
value = self.context.decrement('gauge')
self.assertEqual(value, -1)
self.context.set('gauge', 'world')
self.assertEqual(self.context.get('gauge'), 'world')
value = self.context.decrement('gauge')
self.assertEqual(value, -1)
def test_gauge(self):
# undefined key
self.assertEqual(self.context.get('gauge'), None)
# see if type mismatch would create an error
self.context.set('gauge', 'world')
self.assertEqual(self.context.get('gauge'), 'world')
# increment and decrement the counter
value = self.context.increment('gauge')
self.assertEqual(value, 1)
self.assertEqual(self.context.get('gauge'), 1)
self.assertEqual(self.context.decrement('gauge', 2), -1)
self.assertEqual(self.context.increment('gauge', 4), 3)
self.assertEqual(self.context.decrement('gauge', 10), -7)
self.assertEqual(self.context.increment('gauge', 27), 20)
self.assertEqual(self.context.get('gauge'), 20)
# default value is meaningless when key has been set
self.assertEqual(self.context.get('gauge', 'world'), 20)
# reset the gauge
self.context.set('gauge', 123)
self.assertEqual(self.context.get('gauge'), 123)
def test_concurrency(self):
def worker(id, context):
for i in range(4):
r = random.random()
time.sleep(r)
value = context.increment('gauge')
logging.info('worker %d:counter=%d' % (id, value))
logging.info('worker %d:done' % id)
logging.info('Creating a counter')
self.counter = Context()
logging.info('Launching incrementing workers')
workers = []
for i in range(4):
p = Process(target=worker, args=(i, self.counter,))
p.start()
workers.append(p)
logging.info('Waiting for worker threads')
for p in workers:
p.join()
logging.info('Counter: %d' % self.counter.get('gauge'))
self.assertEqual(self.counter.get('gauge'), 16)
if __name__ == '__main__':
Context.set_logger()
sys.exit(unittest.main())
|
__init__.py
|
# some worker's process function
from minibatch import connectdb, Stream, streaming
from minibatch.example.util import clean
def consumer():
# process window.data. maybe split processing in parallel... whatever
# @stream('test', size=2, emitter=SampleFunctionWindow)
# @stream('test', interval=5)
# @stream('test', interval=5, relaxed=False, keep=True)
@streaming('test', size=5, keep=True)
def myprocess(window):
try:
db = connectdb(alias='consumer')
print("consuming ... {}".format(window.data))
db.processed.insert_one({'data': window.data or {}})
except Exception as e:
print(e)
return window
# some producer
def producer(data):
import os
import time
import random
# sleep to simulate multiple time windows
time.sleep(random.randrange(0, 1, 1) / 10.0)
data.update({'pid': os.getpid()})
connectdb(alias='producer')
stream_name = 'test'
stream = Stream.get_or_create(stream_name)
print("producing ... {}".format(data))
stream.append(data)
def main():
from multiprocessing import Pool, Process
import time
clean()
emitp = Process(target=consumer)
emitp.start()
pool = Pool(4)
data = [{'value': i} for i in range(0, 100)]
pool.map(producer, data, 1)
time.sleep(5)
emitp.terminate()
db = connectdb()
print("processed items:")
print(list(doc for doc in db.processed.find()))
|
ionosphere.py
|
from __future__ import division
import logging
import os
from os import kill, getpid, listdir
from os.path import join, isfile
from sys import version_info
try:
from Queue import Empty
except:
from queue import Empty
from time import time, sleep
from threading import Thread
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets in place of Manager().list to reduce memory and number of
# processes
# from multiprocessing import Process, Manager
from multiprocessing import Process
import re
from shutil import rmtree
# import csv
from ast import literal_eval
from datetime import datetime
from redis import StrictRedis
import traceback
import mysql.connector
# from mysql.connector import errorcode
from sqlalchemy.sql import select
# @added 20180715 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
from sqlalchemy.sql import desc
# @added 20161213 - Branch #1790: test_tsfresh
# To match the new order introduced via the test_tsfresh method
import numpy as np
# import pandas as pd
# @added 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Feature #1844: ionosphere_matched DB table
from tsfresh import __version__ as tsfresh_version
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
from pymemcache.client.base import Client as pymemcache_Client
# @added 20180617 - Feature #2404: Ionosphere - fluid approximation
import pandas as pd
from tsfresh.feature_extraction import (
extract_features, ReasonableFeatureExtractionSettings)
import settings
from skyline_functions import (
fail_check, mysql_select, write_data_to_file, send_graphite_metric,
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# @modified 20190408 - Feature #2484: FULL_DURATION feature profiles
# Moved to common_functions
# get_memcache_metric_object)
mkdir_p)
# @added 20161221 - calculate features for every anomaly, instead of making the
# user do it in the frontend or calling the webapp constantly in a cron like
# manner. Decouple Ionosphere from the webapp.
from features_profile import calculate_features_profile
# @modified 20170107 - Feature #1844: ionosphere_matched DB table
# Added ionosphere_matched_meta
from database import (
get_engine, ionosphere_table_meta,
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# @modified 20190408 - Feature #2484: FULL_DURATION feature profiles
# Moved to common_functions
# metrics_table_meta,
ionosphere_matched_table_meta)
from tsfresh_feature_names import TSFRESH_FEATURES
# @added 20170114 - Feature #1854: Ionosphere learn
# @modified 20170117 - Feature #1854: Ionosphere learn - generations
# Renamed the function from simple learn to the meme it has become
# from learn import learn
from learn import ionosphere_learn
# @added 20170306 - Feature #1960: ionosphere_layers
from layers import run_layer_algorithms
# @added 20190322 - Feature #2484: FULL_DURATION feature profiles
from common_functions import (
get_metrics_db_object, get_calculated_features)
# @added 20190327 - Feature #2484
from echo import ionosphere_echo
skyline_app = 'ionosphere'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
skyline_app_loglock = '%s.lock' % skyline_app_logfile
skyline_app_logwait = '%s.wait' % skyline_app_logfile
python_version = int(version_info[0])
this_host = str(os.uname()[1])
# Converting one settings variable into a local variable, just because it is a
# long string otherwise.
try:
ENABLE_IONOSPHERE_DEBUG = settings.ENABLE_IONOSPHERE_DEBUG
except:
logger.error('error :: cannot determine ENABLE_IONOSPHERE_DEBUG from settings')
ENABLE_IONOSPHERE_DEBUG = False
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
# @added 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# Number of processes to assign to Ionosphere, however Ionosphere should never
# need more than 1 and is effectively hard coded as such currently. This
# variable is only declared for the purpose of maintaining a standard set up in
# each module and to possibly enable more than one processor on Ionosphere in
# the future, should there be a requirement for Ionosphere to analyse the
# metrics quicker. Running Ionosphere with more than one process is untested
# and currently it is hard coded to be 1
# (https://github.com/earthgecko/skyline/issues/69)
try:
IONOSPHERE_PROCESSES = settings.IONOSPHERE_PROCESSES
if IONOSPHERE_PROCESSES != 1:
IONOSPHERE_PROCESSES = 1
except:
IONOSPHERE_PROCESSES = 1
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
# Do not run checks if namespace has matched multiple times in the
# last 10 minutes. However determining which Skyline related metrics
# are feeding back are quite difficult to ascetain. So use the
# ionosphere_busy logic again and use or find the skyline host namespace
# and if busy do not analyse the Skyline host namespace while
# ionosphere is busy.
try:
SKYLINE_FEEDBACK_NAMESPACES = settings.SKYLINE_FEEDBACK_NAMESPACES
except:
# Let us take a guess
try:
graphite_host = settings.GRAPHITE_HOST
graphite_hostname = graphite_host.split('.', -1)[0]
SKYLINE_FEEDBACK_NAMESPACES = [settings.SERVER_METRICS_NAME, graphite_hostname]
except:
SKYLINE_FEEDBACK_NAMESPACES = [this_host]
skyline_app_graphite_namespace = 'skyline.%s%s' % (skyline_app, SERVER_METRIC_PATH)
max_age_seconds = settings.IONOSPHERE_CHECK_MAX_AGE
# Database configuration
config = {'user': settings.PANORAMA_DBUSER,
'password': settings.PANORAMA_DBUSERPASS,
'host': settings.PANORAMA_DBHOST,
'port': settings.PANORAMA_DBPORT,
'database': settings.PANORAMA_DATABASE,
'raise_on_warnings': True}
failed_checks_dir = '%s_failed' % settings.IONOSPHERE_CHECK_PATH
last_purge_key = '%s.last_purge_ts' % skyline_app
LOCAL_DEBUG = False
class Ionosphere(Thread):
"""
The Ionosphere class which controls the ionosphere thread and spawned
processes.
"""
def __init__(self, parent_pid):
"""
Initialize Ionosphere
Define Redis and memcached connections
"""
super(Ionosphere, self).__init__()
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
if settings.REDIS_PASSWORD:
self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
else:
self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = getpid()
self.mysql_conn = mysql.connector.connect(**config)
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Task #3032: Debug number of Python processes and memory use
# Branch #3002: docker
# Reduce amount of Manager instances that are used as each requires a
# copy of entire memory to be copied into each subprocess so this
# results in a python process per Manager instance, using as much
# memory as the parent. OK on a server, not so much in a container.
# Disabled all the Manager().list() below and replaced with Redis sets
# self.anomalous_metrics = Manager().list()
# self.not_anomalous = Manager().list()
# self.features_profiles_checked = Manager().list()
# self.training_metrics = Manager().list()
# self.sent_to_panorama = Manager().list()
# @added 20170108 - Feature #1830: Ionosphere alerts
# Added lists of ionosphere_smtp_alerter_metrics and
# ionosphere_non_smtp_alerter_metrics
# self.ionosphere_smtp_alerter_metrics = Manager().list()
# self.ionosphere_non_smtp_alerter_metrics = Manager().list()
# @added 20170306 - Feature #1960: ionosphere_layers
# self.layers_checked = Manager().list()
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
if settings.MEMCACHE_ENABLED:
self.memcache_client = pymemcache_Client((settings.MEMCACHED_SERVER_IP, settings.MEMCACHED_SERVER_PORT), connect_timeout=0.1, timeout=0.2)
else:
self.memcache_client = None
def check_if_parent_is_alive(self):
"""
Self explanatory
"""
try:
kill(self.current_pid, 0)
kill(self.parent_pid, 0)
except:
exit(0)
"""
These are the ionosphere mysql functions used to surface and input
ionosphere data for timeseries.
"""
def mysql_insert(self, insert):
"""
Insert data into mysql table
:param insert: the insert string
:type insert: str
:return: int
:rtype: int or boolean
- **Example usage**::
query = 'insert into host (host) VALUES (\'this_host\')'
result = self.mysql_insert(query)
.. note::
- If the MySQL query fails a boolean will be returned not a tuple
* ``False``
* ``None``
"""
try:
cnx = mysql.connector.connect(**config)
if ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: connected to mysql')
except mysql.connector.Error as err:
logger.error('error :: mysql error - %s' % str(err))
logger.error('error :: failed to connect to mysql')
raise
if cnx:
try:
cursor = cnx.cursor()
cursor.execute(insert)
inserted_id = cursor.lastrowid
# Make sure data is committed to the database
cnx.commit()
cursor.close()
cnx.close()
return inserted_id
except mysql.connector.Error as err:
logger.error('error :: mysql error - %s' % str(err))
logger.error('Failed to insert record')
cnx.close()
raise
else:
cnx.close()
return False
return False
def purge_old_data_dirs(self, dir_path, older_than):
time_now = time()
logger.info(
'Cleaning old training data from %s older than %s seconds' %
(dir_path, str(older_than)))
try:
for path, folders, files in os.walk(dir_path):
for folder in folders[:]:
folder_path = os.path.join(path, folder)
# Only timestamped directories are removed
if re.match('\d{10}', folder):
if ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: matched - %s' % folder_path)
if (time_now - os.path.getmtime(folder_path)) > older_than:
try:
rmtree(folder_path)
logger.info('removed - %s' % folder_path)
except:
logger.error('error :: failed to rmtree %s' % folder_path)
except:
logger.info(traceback.format_exc())
logger.error('error :: purge_old_data_dirs - os.walk')
last_purge_ts = int(time())
try:
self.redis_conn.setex(last_purge_key, 1800, last_purge_ts)
logger.info('updated Redis key for %s' % last_purge_key)
except:
logger.error('error :: failed to update Redis key for %s' % last_purge_key)
backup_purge_ts_file = '%s/last_purge_ts.txt' % (settings.IONOSPHERE_DATA_FOLDER)
try:
write_data_to_file(skyline_app, backup_purge_ts_file, 'w', last_purge_ts)
logger.info('updated the backup_purge_ts_file with %s' % str(last_purge_ts))
except:
logger.error('error :: failed to update the backup_purge_ts_file - %s' % backup_purge_ts_file)
return
def remove_metric_check_file(self, metric_check_file):
if os.path.isfile(str(metric_check_file)):
try:
os.remove(str(metric_check_file))
logger.info('metric_check_file removed - %s' % str(metric_check_file))
except OSError:
pass
return
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Branch #922: Ionosphere
# Bringing Ionosphere online - do alert on Ionosphere metrics
def manage_ionosphere_unique_metrics(self):
"""
Create a Redis set of all Ionosphere enabled metrics.
:param i: python process id
:return: returns True
"""
def get_an_engine():
try:
engine, log_msg, trace = get_engine(skyline_app)
return engine, log_msg, trace
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
log_msg = 'error :: failed to get MySQL engine for manage_ionosphere_unique_metrics'
logger.error('%s' % log_msg)
return None, log_msg, trace
ionosphere_unique_metrics_count = 0
redis_ionosphere_unique_metrics = None
ionosphere_unique_metrics = []
try:
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
redis_ionosphere_unique_metrics = self.redis_conn.smembers('ionosphere.unique_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.unique_metrics from Redis')
# ionosphere_unique_metrics = []
if redis_ionosphere_unique_metrics is not None:
ionosphere_unique_metrics = list(redis_ionosphere_unique_metrics)
ionosphere_unique_metrics_count = len(ionosphere_unique_metrics)
logger.info('Redis ionosphere.unique_metrics set has %s metrics' % (str(ionosphere_unique_metrics_count)))
else:
logger.info('Redis ionosphere.unique_metrics unknown setting to []')
ionosphere_unique_metrics = []
manage_ionosphere_unique_metrics = True
manage_ionosphere_unique_metrics_key = []
try:
manage_ionosphere_unique_metrics_key = self.redis_conn.get('ionosphere.manage_ionosphere_unique_metrics')
except Exception as e:
if LOCAL_DEBUG:
logger.error('error :: could not query Redis for ionosphere.manage_ionosphere_unique_metrics key: %s' % str(e))
if manage_ionosphere_unique_metrics_key is not None:
manage_ionosphere_unique_metrics = False
logger.info('getting MySQL engine for ionosphere_enabled_metrics')
try:
engine, log_msg, trace = get_an_engine()
logger.info(log_msg)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine for ionosphere_enabled_metrics')
return False
if not engine:
logger.error('error :: MySQL engine not obtained for ionosphere_enabled_metrics')
return False
# Determine the metrics that have ionosphere_enabled
# @added 20170103 - Task #1658: Patterning Skyline Ionosphere
# TODO: We need 2 sets not just ionosphere.unique_metrics otherwise
# if a metric is switch from Analyzer to Mirage will send all
# matched anomalies to Ionosphere even if there is no features
# profile at the specified duration.
# ionosphere.analyzer.unique_metrics (at FULL_DURATION)
# ionosphere.mirage.unique_metrics (NOT at FULL_DURATION)
# @modified 20170108 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Yes those ^^ are needed, MySQL join?
ionosphere_enabled_metrics = []
ionosphere_metrics_count = 0
query_ok = False
try:
stmt = 'select metric from metrics where ionosphere_enabled=1'
connection = engine.connect()
for row in engine.execute(stmt):
metric_basename = row['metric']
metric_name = '%s%s' % (str(settings.FULL_NAMESPACE), str(metric_basename))
ionosphere_enabled_metrics.append(metric_name)
connection.close()
query_ok = True
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine ionosphere_enabled metrics from the DB to manage ionosphere.unique_metrics Redis set')
ionosphere_metrics_count = len(ionosphere_enabled_metrics)
logger.info('db has %s ionosphere_enabled metrics' % (str(ionosphere_metrics_count)))
# @added 20190528 - Branch #3002: docker
if ionosphere_metrics_count == 0:
ionosphere_enabled_metrics = ['none']
if manage_ionosphere_unique_metrics:
# Testing the query was fine and Ionosphere metrics can go to 0 if
# all were disabled
if query_ok:
manage_ionosphere_unique_metrics = True
else:
manage_ionosphere_unique_metrics = False
if manage_ionosphere_unique_metrics:
for metric_name in ionosphere_enabled_metrics:
try:
self.redis_conn.sadd('ionosphere.new_unique_metrics', metric_name)
# logger.info('added %s to ionosphere.new_unique_metrics Redis set' % metric_name)
except:
logger.error(traceback.format_exc())
logger.info('error :: failed to add %s to ionosphere.new_unique_metrics Redis set' % metric_name)
try:
self.redis_conn.setex('ionosphere.manage_ionosphere_unique_metrics', 300, time())
except:
logger.error('error :: failed to set key :: ionosphere.manage_ionosphere_unique_metrics')
try:
logger.info('replacing Redis ionosphere.unique_metrics via rename of ionosphere.new_unique_metrics')
self.redis_conn.rename('ionosphere.new_unique_metrics', 'ionosphere.unique_metrics')
manage_ionosphere_unique_metrics = False
ionosphere_unique_metrics = []
except Exception as e:
if str(e) == 'no such key':
logger.info('could not rename Redis set ionosphere.new_unique_metrics to ionosphere.unique_metrics: %s' % str(e))
else:
logger.error('error :: could not rename Redis set ionosphere.new_unique_metrics to ionosphere.unique_metrics: %s' % str(e))
try:
self.redis_conn.setex('ionosphere.manage_ionosphere_unique_metrics', 300, time())
except:
logger.error('error :: failed to set key :: ionosphere.manage_ionosphere_unique_metrics')
redis_ionosphere_unique_metrics = []
try:
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
redis_ionosphere_unique_metrics = self.redis_conn.smembers('ionosphere.unique_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.unique_metrics from Redis')
# ionosphere_unique_metrics = []
if redis_ionosphere_unique_metrics is not None:
ionosphere_unique_metrics = list(redis_ionosphere_unique_metrics)
ionosphere_unique_metrics_count = len(ionosphere_unique_metrics)
logger.info('the new Redis ionosphere.unique_metrics set has %s metrics' % (str(ionosphere_unique_metrics_count)))
else:
logger.info('Redis ionosphere.unique_metrics unknown setting to []')
ionosphere_unique_metrics = []
if engine:
try:
engine.dispose()
except:
logger.error(traceback.format_exc())
logger.error('error :: calling engine.dispose()')
return True
# @added 20161230 - Feature #1830: Ionosphere alerts
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
def new_load_metric_vars(self, metric_vars_file):
"""
Load the metric variables for a check from a metric check variables file
:param metric_vars_file: the path and filename to the metric variables files
:type metric_vars_file: str
:return: the metric_vars list or ``False``
:rtype: list
"""
if os.path.isfile(metric_vars_file):
logger.info(
'loading metric variables from metric_check_file - %s' % (
str(metric_vars_file)))
else:
logger.error(
'error :: loading metric variables from metric_check_file - file not found - %s' % (
str(metric_vars_file)))
return False
metric_vars = []
with open(metric_vars_file) as f:
for line in f:
no_new_line = line.replace('\n', '')
no_equal_line = no_new_line.replace(' = ', ',')
array = str(no_equal_line.split(',', 1))
add_line = literal_eval(array)
metric_vars.append(add_line)
string_keys = ['metric', 'anomaly_dir', 'added_by', 'app', 'source']
float_keys = ['value']
# @modified 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Added ionosphere_parent_id, always zero from Analyzer and Mirage
int_keys = [
'from_timestamp', 'metric_timestamp', 'added_at', 'full_duration',
'ionosphere_parent_id']
array_keys = ['algorithms', 'triggered_algorithms']
boolean_keys = ['graphite_metric', 'run_crucible_tests']
metric_vars_array = []
for var_array in metric_vars:
key = None
value = None
if var_array[0] in string_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = str(value_str)
if var_array[0] == 'metric':
metric = value
if var_array[0] in float_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = float(value_str)
if var_array[0] in int_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = int(value_str)
if var_array[0] in array_keys:
key = var_array[0]
value = literal_eval(str(var_array[1]))
if var_array[0] in boolean_keys:
key = var_array[0]
if str(var_array[1]) == 'True':
value = True
else:
value = False
if key:
metric_vars_array.append([key, value])
if len(metric_vars_array) == 0:
logger.error(
'error :: loading metric variables - none found' % (
str(metric_vars_file)))
return False
logger.info('debug :: metric_vars for %s' % str(metric))
logger.info('debug :: %s' % str(metric_vars_array))
return metric_vars_array
# @added 20170109 - Feature #1854: Ionosphere learn
# Added the spawn_learn_process after determining to is not fit to bolt learn
# inside of ionosphere.py in its entirety, no point in more conditional nesting
# and bulking up ionosphere.py with more learn parameter to spin_process etc
# ionosphere.py works, as good as it gets, so extended with learn.py. This uses
# the same no memory leak pattern that was adopted for smtp_alerts.
def spawn_learn_process(self, i, timestamp):
"""
Spawn a process to learn.
This is used for Ionosphere to learn if anomalous metrics remain
anomalous over time, as the resolution decreases. It follows the
multiprocessing methodology the was introduced in Analyzer and Mirage
in the context of the process objects being cleared down and the learn
processes cannot create memory leaks as the process always terminates or
is terminated this prevents any memory leaks in the parent.
"""
# @modified 20170117 - Feature #1854: Ionosphere learn - generations
# Renamed the function from simple learn to the meme it has become
# learn(timestamp)
ionosphere_learn(timestamp)
# @added 20190326 - Feature #2484: FULL_DURATION feature profiles
def process_ionosphere_echo(self, i, metric_check_file):
"""
Spawn a process_ionosphere_echo check to create features profiles at
settings.FULL_DURATION for Mirage metrics
:param i: python process id
:param metric_check_file: full path to the metric check file
:type i: object
:type metric_check_file: str
:return: boolean
:rtype: boolean
"""
try:
# Load and validate metric variables
metric_vars_array = self.new_load_metric_vars(str(metric_check_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: process_ionosphere_echo :: failed to load metric variables from check file - %s' % (metric_check_file))
return
added_by = None
try:
key = 'added_by'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
added_by = str(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - added_by - %s' % added_by)
except:
logger.error('error :: process_ionosphere_echo failed to read added_by variable from check file - %s' % (metric_check_file))
added_by = None
if not added_by:
return
if added_by != 'mirage':
logger.info('process_ionosphere_echo :: only mirage metrics are processed not metrics added_by %s' % added_by)
return
metric = None
try:
# metric_vars.metric
key = 'metric'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric = str(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - metric - %s' % metric)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to read metric variable from check file - %s' % (metric_check_file))
metric = None
if not metric:
logger.error('error :: process_ionosphere_echo failed to load metric variable from check file - %s' % (metric_check_file))
return
# @added 20190413 - Feature #2484: FULL_DURATION feature profiles
# Only process if it is an ionosphere enabled metric
try:
ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.unique_metrics from Redis')
ionosphere_unique_metrics = []
if ionosphere_unique_metrics:
# @modified 20190413 - Bug #2942: process_ionosphere_echo metric mismatch
# Feature #2484: FULL_DURATION feature profiles
# Matching bug for not in list comprehension it must be an absolute
# match
# if not metric in ionosphere_unique_metrics:
metric_name = '%s%s' % (str(settings.FULL_NAMESPACE), str(metric))
# @modified 20190522: Task #3034: Reduce multiprocessing Manager list usage
# if not metric_name in ionosphere_unique_metrics:
if metric_name not in ionosphere_unique_metrics:
logger.info('process_ionosphere_echo :: only ionosphere enabled metrics are processed, skipping %s' % metric)
return
full_duration = None
try:
# metric_vars.full_duration
key = 'full_duration'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
full_duration = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - full_duration - %s' % str(full_duration))
except:
logger.error('error :: process_ionosphere_echo failed to read full_duration variable from check file - %s' % (metric_check_file))
full_duration = None
if not full_duration:
return
logger.info('process_ionosphere_echo :: processing - %s' % (metric))
ionosphere_echo(metric, full_duration)
# @modified 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Added the ionosphere_busy parameter
# def spin_process(self, i, metric_check_file):
def spin_process(self, i, metric_check_file, ionosphere_busy):
"""
Assign an anomalous metric to check against features profiles.
:param i: python process id
:param metric_check_file: full path to the metric check file
:param ionosphere_busy: whether to Ionosphere manage and alternate
between normal Ionosphere and echo analysis
:type i: object
:type metric_check_file: str
:type ionosphere_busy: boolen
:return: int
:rtype: int or boolean
"""
def get_an_engine():
try:
engine, log_msg, trace = get_engine(skyline_app)
return engine, log_msg, trace
except:
logger.error(traceback.format_exc())
log_msg = 'error :: failed to get MySQL engine in spin_process'
logger.error('error :: failed to get MySQL engine in spin_process')
return None, log_msg, trace
def engine_disposal(engine):
if engine:
try:
engine.dispose()
except:
logger.error(traceback.format_exc())
logger.error('error :: calling engine.dispose()')
return
child_process_pid = os.getpid()
logger.info('child_process_pid - %s' % str(child_process_pid))
try:
ionosphere_max_runtime = settings.IONOSPHERE_MAX_RUNTIME
except:
ionosphere_max_runtime = 120
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: processing metric check - %s' % metric_check_file)
if not os.path.isfile(str(metric_check_file)):
logger.error('error :: file not found - metric_check_file - %s' % (str(metric_check_file)))
return
engine = None
anomalous_timeseries = False
check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_name - %s' % check_file_name)
check_file_timestamp = check_file_name.split('.', 1)[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_timestamp - %s' % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split('.', 1)[1]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_txt - %s' % check_file_metricname_txt)
check_file_metricname = check_file_metricname_txt.replace('.txt', '')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname - %s' % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace('.', '/')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_dir - %s' % check_file_metricname_dir)
metric_failed_check_dir = '%s/%s/%s' % (failed_checks_dir, check_file_metricname_dir, check_file_timestamp)
failed_check_file = '%s/%s' % (metric_failed_check_dir, check_file_name)
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: failed_check_file - %s' % failed_check_file)
# @added 20170307 - Feature #1960: ionosphere_layers - ionosphere_check_cache_key
# This Redis cache key check was added to prevent Ionosphere from
# running riot on checks if for some reason the check_file is not
# removed which happens if some exception is not handled as found out
# again during yesterday's development of run_layer_algorithms. It was
# a good reminder of how fast Skyline can iterate.
ionosphere_check_cache_key = 'ionosphere.check.%s' % check_file_name
check_done = False
try:
check_done = self.redis_conn.get(ionosphere_check_cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not check_done:
logger.info('check done check - no check cache key - %s' % ionosphere_check_cache_key)
else:
# @modified 20181113 - Task #2680: Remove Ionosphere check files is key exists
# This was here for initially debugging, no longer needed
# logger.error('error :: a check cache key exists - %s' % ionosphere_check_cache_key)
# logger.error('error :: failing check to prevent multiple iterations over this check')
# fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
logger.info('a check cache key exists - %s' % (ionosphere_check_cache_key))
logger.info('to prevent multiple iterations over this check removing %s' % (
str(metric_check_file)))
self.remove_metric_check_file(str(metric_check_file))
return
try:
check_process_start = int(time())
# @modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# redis-py 3.x only accepts user data as bytes, strings or
# numbers (ints, longs and floats). All 2.X users should
# make sure that the keys and values they pass into redis-py
# are either bytes, strings or numbers. Added cache_key_value
# self.redis_conn.setex(
# ionosphere_check_cache_key, 300, [check_process_start])
self.redis_conn.setex(
ionosphere_check_cache_key, 300, check_process_start)
logger.info(
'added Redis check key - %s' % (ionosphere_check_cache_key))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to add Redis check key - %s' % (ionosphere_check_cache_key))
logger.error('error :: failing check to prevent multiple iterations over this check')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
# Load and validate metric variables
# @modified 20161231 - Feature #1830: Ionosphere alerts
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
# Use def new_load_metric_vars(self, metric_vars_file):
# metric_vars = load_metric_vars(skyline_app, str(metric_check_file))
metric_vars_array = self.new_load_metric_vars(str(metric_check_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to load metric variables from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# Test metric variables
# We use a pythonic methodology to test if the variables are defined,
# this ensures that if any of the variables are not set for some reason
# we can handle unexpected data or situations gracefully and try and
# ensure that the process does not hang.
metric = None
try:
# metric_vars.metric
# metric = str(metric_vars.metric)
key = 'metric'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric = str(value_list[0])
base_name = metric
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - metric - %s' % metric)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to read metric variable from check file - %s' % (metric_check_file))
metric = None
if not metric:
logger.error('error :: failed to load metric variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
value = None
try:
# metric_vars.value
# value = str(metric_vars.value)
key = 'value'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
value = float(value_list[0])
anomalous_value = value
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - value - %s' % str(value))
except:
logger.error('error :: failed to read value variable from check file - %s' % (metric_check_file))
value = None
if not value:
# @modified 20181119 - Bug #2708: Failing to load metric vars
if value == 0.0:
pass
else:
logger.error('error :: failed to load value variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
from_timestamp = None
try:
# metric_vars.from_timestamp
# from_timestamp = str(metric_vars.from_timestamp)
key = 'from_timestamp'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
from_timestamp = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - from_timestamp - %s' % str(from_timestamp))
except:
# @added 20160822 - Bug #1460: panorama check file fails
# Added exception handling here
logger.info(traceback.format_exc())
logger.error('error :: failed to read from_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if not from_timestamp:
logger.error('error :: failed to load from_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
metric_timestamp = None
try:
# metric_vars.metric_timestamp
# metric_timestamp = str(metric_vars.metric_timestamp)
key = 'metric_timestamp'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric_timestamp = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - metric_timestamp - %s' % str(metric_timestamp))
except:
logger.error('error :: failed to read metric_timestamp variable from check file - %s' % (metric_check_file))
metric_timestamp = None
if not metric_timestamp:
logger.error('error :: failed to load metric_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
# metric_vars.algorithms
# algorithms = metric_vars.algorithms
key = 'algorithms'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
algorithms = value_list[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - algorithms - %s' % str(algorithms))
except:
logger.error('error :: failed to read algorithms variable from check file setting to all - %s' % (metric_check_file))
algorithms = 'all'
try:
# metric_vars.triggered_algorithms
# triggered_algorithms = metric_vars.triggered_algorithms
key = 'triggered_algorithms'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
triggered_algorithms = value_list[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - triggered_algorithms - %s' % str(triggered_algorithms))
except:
logger.error('error :: failed to read triggered_algorithms variable from check file setting to all - %s' % (metric_check_file))
triggered_algorithms = 'all'
added_by = None
try:
# metric_vars.added_by
# added_by = str(metric_vars.added_by)
key = 'added_by'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
added_by = str(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - added_by - %s' % added_by)
except:
logger.error('error :: failed to read added_by variable from check file - %s' % (metric_check_file))
added_by = None
if not added_by:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20170117 - Feature #1854: Ionosphere learn - generations
if str(added_by) == 'ionosphere_learn':
logger.info('debug :: metric variable - added_by - %s' % added_by)
try:
# metric_vars.added_at
# added_at = str(metric_vars.added_at)
key = 'added_at'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
added_at = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - added_at - %s' % str(added_at))
except:
logger.error('error :: failed to read added_at variable from check file setting to all - %s' % (metric_check_file))
added_at = metric_timestamp
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Added full_duration which needs to be recorded to allow Mirage metrics
# to be profiled on Redis timeseries data at FULL_DURATION
full_duration = None
try:
# metric_vars.full_duration
# full_duration = str(metric_vars.full_duration)
key = 'full_duration'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
full_duration = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - full_duration - %s' % str(full_duration))
except:
logger.error('error :: failed to read full_duration variable from check file - %s' % (metric_check_file))
full_duration = None
if not full_duration:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Added ionosphere_parent_id, always zero from Analyzer and Mirage
ionosphere_parent_id = None
ionosphere_parent_id_determined = False
try:
key = 'ionosphere_parent_id'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
ionosphere_parent_id = int(value_list[0])
ionosphere_parent_id_determined = True
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - ionosphere_parent_id - %s' % str(ionosphere_parent_id))
except:
logger.error('error :: failed to read ionosphere_parent_id variable from check file - %s' % (metric_check_file))
ionosphere_parent_id = None
if not ionosphere_parent_id_determined:
logger.error('error :: failed to determine ionosphere_parent_id variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @modified 20170116 - Feature #1854: Ionosphere learn
# Do not check the cache key or anomaly age if added by ionosphere_learn
if added_by != 'ionosphere_learn':
# @added 20170101 - Feature #1830: Ionosphere alerts
# Remove check file is an alert key exists
cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
last_alert = False
try:
last_alert = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not last_alert:
logger.info('debug :: no alert cache key - %s' % cache_key)
else:
logger.info('debug :: removing check - alert cache key exists - %s' % cache_key)
self.remove_metric_check_file(str(metric_check_file))
return
now = time()
anomaly_age = int(now) - int(metric_timestamp)
if anomaly_age > max_age_seconds:
logger.info(
'Ionosphere check max age exceeded - %s - %s seconds old, older than %s seconds discarding' % (
metric, str(anomaly_age), str(max_age_seconds)))
with open(metric_check_file, 'rt') as fr:
metric_check_file_contents = fr.readlines()
logger.info(
'debug :: metric check file contents\n%s' % (str(metric_check_file_contents)))
self.remove_metric_check_file(str(metric_check_file))
return
else:
logger.info('processing check_file for ionosphere_learn - %s' % str(metric_check_file))
# @added 20161222 - ionosphere should extract features for every anomaly
# check that is sent through and calculate a feature_profile ready for
# submission by the user if they so choose. Further ionosphere could
# make itself more useful by comparing any training data profiles to
# further anomalies, however the feature profiles for subsequent
# anomalies may be similar enough to match a few times and each a closer
# match to the next.
training_metric = False
metrics_id = None
metric_ionosphere_enabled = None
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the metrics_db_object so it is available to determine all
# the details of all features profiles for the metric, this has all
# the generations values avaialble in it. Here we go! Learn!
metrics_db_object = None
# @modified 20190325 - Feature #2484: FULL_DURATION feature profiles
# Moved get_metrics_db_object block to common_functions.py
try:
metrics_db_object = get_metrics_db_object(base_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine metrics_db_object from get_metrics_db_object for %s' % base_name)
if metrics_db_object:
metrics_id = None
try:
metrics_id = int(metrics_db_object['id'])
except:
# @added 20190509 - Bug #2984: Ionosphere - could not determine values from metrics_db_object
# Added a traceback here to debug an issue
logger.error(traceback.format_exc())
logger.error('error :: could not determine id from metrics_db_object for %s' % base_name)
metrics_id = None
metric_ionosphere_enabled = None
training_metric = True
if metrics_id:
# @modified 20190510 - Bug #2984: Ionosphere - could not determine values from metrics_db_object
# metric_ionosphere_enabled = int(metrics_db_object['ionosphere_enabled'])
metric_ionosphere_enabled = None
try:
metric_ionosphere_enabled = int(metrics_db_object['ionosphere_enabled'])
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine ionosphere_enabled from metrics_db_object for %s' % base_name)
if metric_ionosphere_enabled is not None:
training_metric = False
else:
training_metric = True
if metric_ionosphere_enabled == 1:
training_metric = False
if metric_ionosphere_enabled == 0:
training_metric = True
else:
metrics_id = None
metric_ionosphere_enabled = None
training_metric = True
logger.error('error :: could not determine metric id from memcache or metrics tables for %s' % base_name)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
# Do not run checks if namespace has matched multiple times in the
# last 10 minutes. However determining which Skyline related metrics
# are feeding back are quite difficult to ascetain. So use the
# ionosphere_busy logic again and use or find the skyline host namespace
# and if busy do not analyse the Skyline host namespace while
# ionosphere is busy.
feedback_metric = False
if ionosphere_busy:
metric_namespace_elements = base_name.split('.')
for to_skip in SKYLINE_FEEDBACK_NAMESPACES:
if to_skip in base_name:
feedback_metric = True
break
to_skip_namespace_elements = to_skip.split('.')
elements_matched = set(metric_namespace_elements) & set(to_skip_namespace_elements)
if len(elements_matched) == len(to_skip_namespace_elements):
feedback_metric = True
break
if feedback_metric:
cache_key = 'ionosphere.feedback_metric.checked.%s' % (base_name)
logger.info('feedback metric identified adding Redis key with 600 TTL - %s' % cache_key)
try:
self.redis_conn.setex(cache_key, 600, int(time()))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s key to Redis' % (
str(cache_key)))
# @added 20170116 - Feature #1854: Ionosphere learn - generations
# If this is added_by ionosphere_learn the id is only
# added if the use_full_duration_days features profile
# is less than max_generations as if it is at the max
# then a new features profile cannot be created from it
# even if it is a match.
metric_max_generations = None
if added_by == 'ionosphere_learn':
try:
metric_max_generations = int(metrics_db_object['max_generations'])
logger.info('determing max_generations for ionosphere_learn check - %s - %s' % (str(metric_max_generations), base_name))
except:
logger.error(traceback.format_exc())
logger.error('error ::ionosphere_learn check could not determine the metric max_generations from the metrics_db_object for %s' % base_name)
if not metric_max_generations:
logger.error('error ::ionosphere_learn check cannot continue without max_generations for %s' % base_name)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# The Redis ionosphere.ionosphere_non_smtp_alerter_metrics list is created here to
# replace the self.ionosphere_non_smtp_alerter_metrics Manager.list in the below
# section
ionosphere_non_smtp_alerter_metrics = []
try:
ionosphere_non_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_non_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere.ionosphere_non_smtp_alerter_metrics Redis set')
ionosphere_non_smtp_alerter_metrics = []
# @added 20170108 - Feature #1830: Ionosphere alerts
# Only process smtp_alerter_metrics
if training_metric:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name in self.ionosphere_non_smtp_alerter_metrics:
if base_name in ionosphere_non_smtp_alerter_metrics:
logger.error('error :: Ionosphere does not handle metrics that do not have a smtp alert context removing check for %s' % (base_name))
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.training_metrics.append(base_name)
redis_set = 'ionosphere.training_metrics'
data = str(base_name)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
logger.info(
'ionosphere_enabled is %s for metric id %s - %s' % (
str(metric_ionosphere_enabled), str(metrics_id),
base_name))
if training_metric:
logger.info('Ionosphere is not enabled on %s' % (base_name))
else:
logger.info('Ionosphere is enabled on %s' % (base_name))
# @added 20161210 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Only continue if there is a training data json timeseries file
metric_timeseries_dir = base_name.replace('.', '/')
# @modified 20170115 - Feature #1854: Ionosphere learn
# Allowing the bifurcation of the metric_training_data_dir based on
# whether added_by is ionosphere_learn or not, this allows Ionosphere to
# be brought online to start evaluating the learn features profiles at
# 30 days or whatever the learn_full_duration_days is for the metric
# that is being automatically learnt uses these fuller duration features
# to determine if a new training data set has been created for an
# ionosphere_enabled metric. Here Ionosphere starts to try and get
# clever, let us hope not too clever, but this is where the
# max_percent_diff_from_origin and max_generations comes in. So ...
# here we go, a really "Crazy feedback loop" @astanway :) I would say
# that this is going to be way more useful than the last referenced one
# in https://github.com/etsy/skyline/pull/90#r13592782 ;) This is it
# 20170115202500 UTC Ionosphere really is now really going to begin.
# Here we go! Learn!
# metric_training_data_dir = '%s/%s/%s' % (
# settings.IONOSPHERE_DATA_FOLDER, metric_timestamp,
# metric_timeseries_dir)
if added_by != 'ionosphere_learn':
metric_training_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, metric_timestamp,
metric_timeseries_dir)
else:
# Here we go! Learn you bugger! SUCH A BIG THANKS TO tsfresh!
# And flowjob and The White Stripes, @matzhouse, her and the Dude.
metric_training_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_LEARN_FOLDER, metric_timestamp,
metric_timeseries_dir)
anomaly_json = '%s/%s.json' % (metric_training_data_dir, base_name)
if os.path.isfile(anomaly_json):
logger.info('training data ts json available - %s' % (anomaly_json))
else:
logger.error('error :: training data ts json was not found - %s' % (anomaly_json))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# The timeseries full_duration needs to be recorded to allow Mirage metrics to
# be profiled on Redis timeseries data at FULL_DURATION
# e.g. mirage.redis.24h.json
if training_metric:
logger.info('training metric - %s' % (base_name))
redis_anomaly_json = False
if added_by == 'mirage':
logger.info('checking training data Redis json is available')
# Always calculate features for both the SECOND_ORDER_RESOLUTION_SECONDS
# timeseries data and the FULL_DURATION Redis timeseries data.
# It is always preferable to create a features profile on a FULL_DURATION
# data set, unless the user is flagging the actual Mirage timeseries as
# not anomalous. In the Mirage context the not anomalous may often be more
# "visibile" in the FULL_DURATION view and if so should be matched on the
# FULL_DURATION timeseries data, even if it is a Mirage metric.
# Features profiles can be created for a Mirage metric on both the
# FULL_DURATION and the SECOND_ORDER_RESOLUTION_SECONDS data sets, however
# only one should be needed.
# A features profile should always be created at the highest resolution
# possible, FULL_DURATION data, wherever possible.
try:
full_duration_hours = str(int(settings.FULL_DURATION / 3600))
redis_anomaly_json = '%s/%s.mirage.redis.%sh.json' % (metric_training_data_dir, base_name, full_duration_hours)
if os.path.isfile(redis_anomaly_json):
logger.info('training data Redis full duration ts json available - %s' % (redis_anomaly_json))
else:
logger.info('no training data Redis full duration json was not found - %s' % (redis_anomaly_json))
except:
logger.error(traceback.format_exc())
logger.error('error :: training data Redis full duration json was not found - %s' % (redis_anomaly_json))
# @added 20161209 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Use SQLAlchemy, mysql.connector is still upstairs ^^ but starting the
# move to SQLAlchemy now that all the webapp Ionosphere SQLAlchemy
# patterns work and the database lay out if defined we can begin on the
# data side. Ionosphere was put together backwards, like tsfresh was
# learnt. It was the people input first here in many ways, which is
# exactly how it was suppose to be.
# This is now the Ionosphere meat.
# Get a MySQL engine only if not training_metric
if not training_metric:
if not metrics_id:
logger.error('error :: metric id not known')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return False
# @added 20170101 - Feature #1836: ionosphere - local features profiles disk cache
# Cache fp ids for 300 seconds?
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
logger.info('getting MySQL engine')
try:
engine, log_msg, trace = get_an_engine()
logger.info(log_msg)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to get fp_ids')
if not engine:
logger.error('error :: engine not obtained to get fp_ids')
try:
ionosphere_table, log_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere_table meta for %s' % base_name)
# Determine the fp_ids that exist for the metric
fp_ids = []
fp_ids_found = False
# @added 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Added all_fp_ids so that we can handle multiple durations and not
# error and reminds me of the needed metrics by FULL_DURATION
# ionosphere.analyzer.unique_metrics (at FULL_DURATION)
# ionosphere.mirage.unique_metrics (NOT at FULL_DURATION)
all_fp_ids = []
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the fp_ids_db_object so it is available to determine all
# the details of all features profiles for the metric, this has all
# the generations values avaialble in it.
# Here we go! Learn!
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# fp_ids_db_object = None
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly.
# Set result to None here to fix a interpolation error below
result = None
fp_layers_ids = []
fp_layers_present = False
# @added 20190326 - Feature #2484: FULL_DURATION feature profiles
# After the features profile evaluations this fps_db_object will
# be used to determine what settings.FULL_DURATION features
# profiles need to be created for ionosphere_echo
fps_db_object = None
# @added 20190919 - Feature #2484: FULL_DURATION feature profiles
# Set both fp_count_with_echo and fp_count to 0 initially so that
# if the are echo fps, then the database can be updated with the
# fp_count_with_echo value for fp_count in the ionosphere_matched
# table
fp_count = 0
fp_count_with_echo = 0
try:
connection = engine.connect()
# @modified 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# Order by the latest features profile, this also results in the
# layers ids being ordered by latest too.
# stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == metrics_id)
stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == metrics_id).order_by(desc(ionosphere_table.c.id))
result = connection.execute(stmt)
# @added 20190326 - Feature #2484: FULL_DURATION feature profiles
# To be used for ionosphere_echo
fps_db_object = [{column: value for column, value in rowproxy.items()} for rowproxy in result]
# for row in result:
for row in fps_db_object:
if row['enabled'] != 1:
continue
if row['deleted'] == 1:
continue
fp_id = row['id']
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly
fp_layers_id = int(row['layers_id'])
if fp_layers_id > 0:
fp_layers_present = True
# @modified 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# Only add the fp_layers_id if > 0
# fp_layers_ids.append(fp_layers_id)
if fp_layers_id > 0:
if fp_layers_id not in fp_layers_ids:
fp_layers_ids.append(fp_layers_id)
# @added 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Added all_fp_ids
all_fp_ids.append(int(fp_id))
if int(row['full_duration']) == int(full_duration):
# @modified 20170116 - Feature #1854: Ionosphere learn - generations
# Handle ionosphere_learn
if added_by != 'ionosphere_learn':
fp_ids.append(int(fp_id))
logger.info('using fp id %s matched full_duration %s - %s' % (str(fp_id), str(full_duration), base_name))
else:
# @added 20170116 - Feature #1854: Ionosphere learn - generations
# If this is added_by ionosphere_learn the id is only
# added if the use_full_duration_days features profile
# is less than max_generations as if it is at the max
# then a new features profile cannot be created from it
# even if it is were to match. Ionosphere learn is
# limited here on generation.
# Set the default as max e.g. not allowed
current_fp_generation = int(metric_max_generations)
try:
current_fp_generation = row['generation']
if int(current_fp_generation) < int(metric_max_generations):
fp_ids.append(int(fp_id))
logger.info(
'valid ionosphere_learn generation %s - fp id %s matched full_duration %s - %s' % (
str(current_fp_generation), str(fp_id),
str(full_duration), base_name))
else:
logger.info(
'ionosphere_learn cannot check due to max_generations of %s would be exceeded, current generation %s - fp id %s matched full_duration %s - %s' % (
str(metric_max_generations), str(current_fp_generation), str(fp_id),
str(full_duration), base_name))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: ionosphere_learn check could not determine the fp generation of fp id %s from the row object for %s' % (
str(fp_id), base_name))
else:
logger.info('not using fp id %s not matched full_duration %s - %s' % (str(fp_id), str(full_duration), base_name))
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the fp_ids_db_object so it is available throughout
# Here we go! Learn!
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# fp_ids_db_object = row
connection.close()
fp_count = len(fp_ids)
logger.info('determined %s fp ids for %s' % (str(fp_count), base_name))
# @added 20170309 - Feature #1960: ionosphere_layers
fp_layers_count = len(fp_layers_ids)
logger.info('determined %s layers ids for %s' % (str(fp_layers_count), base_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine fp ids from DB for %s' % base_name)
fp_count = 0
# @added 20170309 - Feature #1960: ionosphere_layers
fp_layers_count = 0
# @added 20170306 - Feature #1960: ionosphere_layers
# Corrected the interpolation of the fp_ids_db_object above where it
# was set to the last row only, however it was not used anyway.
# Here we go, let us TEACH you properly. We only evaluate
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# if result:
# fp_ids_db_object = result
if len(fp_ids) == 0:
logger.info('there are no fp ids that match full duration for %s' % base_name)
else:
fp_ids_found = True
if not fp_ids_found:
logger.info('no fp ids were found for %s at %s' % (base_name, str(full_duration)))
# @modified 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Use all_fp_ids so that we can handle multiple durations
# fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if len(all_fp_ids) == 0:
logger.error('error :: Ionosphere is enabled on %s but has no feature_profiles' % (base_name))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
else:
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20161221 - TODO: why not calculate the features of every
# anomaly so the the use does not have to do it and wait for the
# features to be calculated.
# Check the features were calculated by the webapp
calculated_feature_file = '%s/%s.tsfresh.input.csv.features.transposed.csv' % (metric_training_data_dir, base_name)
calculated_feature_file_found = False
if os.path.isfile(calculated_feature_file):
logger.info('calculated features available - %s' % (calculated_feature_file))
calculated_feature_file_found = True
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# ionosphere_learn should always provide the features profile csv
# Ionosphere does not create features profiles for learn, it only
# checks them.
# Here we go! Learn!
if added_by == 'ionosphere_learn':
if not calculated_feature_file_found:
logger.error('error :: no ionosphere_learn calculated_feature_file file found - %s' % calculated_feature_file)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
if not calculated_feature_file_found:
if training_metric:
# Allow Graphite resources to be created if they are not an alert
# was not sent therefore features do not need to be calculated
check_time = int(time())
check_age = check_time - int(added_at)
if check_age < 5:
sleep(5)
graphite_file_count = len([f for f in os.listdir(metric_training_data_dir)
if f.endswith('.png') and
os.path.isfile(os.path.join(metric_training_data_dir, f))])
if graphite_file_count == 0:
logger.info('not calculating features no anomaly Graphite alert resources created in %s' % (metric_training_data_dir))
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
logger.info('anomaly Graphite alert resources found in %s' % (metric_training_data_dir))
context = skyline_app
f_calc = None
if not calculated_feature_file_found:
try:
fp_csv, successful, fp_exists, fp_id, log_msg, traceback_format_exc, f_calc = calculate_features_profile(skyline_app, metric_timestamp, base_name, context)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate features')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
if os.path.isfile(calculated_feature_file):
logger.info('calculated features available - %s' % (calculated_feature_file))
calculated_feature_file_found = True
if f_calc:
send_metric_name = '%s.features_calculation_time' % skyline_app_graphite_namespace
f_calc_time = '%.2f' % float(f_calc)
try:
send_graphite_metric(skyline_app, send_metric_name, f_calc_time)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to send calculate features')
if training_metric:
logger.info('training metric done')
self.remove_metric_check_file(str(metric_check_file))
# TODO: make ionosphere more useful, compare any other
# available training_metric profiles here and match, not in the
# db context, in the training context.
if engine:
engine_disposal(engine)
return
if not calculated_feature_file_found:
logger.error('error :: calculated features file not available - %s' % (calculated_feature_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @modified 20161213 - Branch #1790: test_tsfresh
# TODO: Match the test_tsfresh method
# Create an array of the calculated features
calculated_features = []
if calculated_feature_file_found:
calculated_features = get_calculated_features(calculated_feature_file)
if len(calculated_features) == 0:
logger.error('error :: no calculated features were determined from - %s' % (calculated_feature_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
logger.info('%s calculated features determined' % (str(len(calculated_feature_file))))
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_checked = 0
# @modified 20181014 - Feature #2430: Ionosphere validate learnt features profiles page
# layers_checked = 0
layers_checked_count = 0
# @added 20190314 - Feature #2484: FULL_DURATION feature profiles
# Here we add the bifurcation to also create a features
# profile at FULL_DURATION for all Mirage metrics. With a
# view to increase the number of matches trained metric
# achieve by also allowing for the creation and comparing of
# the FULL_DURATION features profiles as well.
echo_check = False
echo_calculated_feature_file = False
echo_calculated_feature_file_found = False
echo_calculated_features = []
echo_fp_ids = []
echo_anomalous_timeseries = None
if added_by == 'mirage':
try:
echo_enabled = settings.IONOSPHERE_ECHO_ENABLED
except:
echo_enabled = False
if echo_enabled:
echo_check = True
# @added 20190403 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# If there are more than 4 metric check files, alternate between normal
# Ionosphere Mirage features profile checks and Ionosphere echo features
# profile checks.
if echo_check:
if ionosphere_busy:
# Check the ionosphere_echo metric Redis keys to see which check
# to run, ionosphere or ionosphere_echo. If Ionosphere is busy,
# Ionosphere will alternate between normal Ionosphere features
# profiles (Mirage duration) and Ionosphere echo features
# profiles (FULL_DURATION) comparison.
echo_ionosphere_check_cache_key = 'ionosphere_echo.ionosphere.check.%s' % base_name
echo_ionosphere_check_key = False
try:
echo_ionosphere_check_key = self.redis_conn.get(echo_ionosphere_check_cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
echo_ionosphere_echo_check_cache_key = 'ionosphere_echo.echo.check.%s' % base_name
echo_ionosphere_echo_check_key = False
try:
echo_ionosphere_echo_check_key = self.redis_conn.get(echo_ionosphere_echo_check_cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
create_ionosphere_echo_check_key = False
remove_ionosphere_echo_check_key = False
# If neither the ionosphere or the ionosphere_echo key exist do
# only check ionosphere
if not echo_ionosphere_check_key:
if not echo_ionosphere_echo_check_key:
echo_check = False
logger.info('ionosphere_busy - only running normal Mirage feature profiles checks, skipping ionosphere_echo checks')
create_ionosphere_echo_check_key = echo_ionosphere_check_cache_key
# If the ionosphere_echo key exists only check ionosphere
if echo_ionosphere_echo_check_key:
echo_check = False
logger.info('ionosphere_busy - only running normal Mirage feature profiles checks, skipping ionosphere_echo checks')
create_ionosphere_echo_check_key = echo_ionosphere_check_cache_key
remove_ionosphere_echo_check_key = echo_ionosphere_echo_check_cache_key
# If ionosphere_echo key exists only check ionosphere
if echo_ionosphere_check_key:
echo_check = True
logger.info('ionosphere_busy - skipping the normal Mirage feature profiles checks as run last time and running ionosphere_echo checks this time')
# Remove the Mirage features profiles from the
fp_ids = []
logger.info('ionosphere_busy - removed %s Mirage feature profile ids from fp_ids' % str(fp_count))
create_ionosphere_echo_check_key = echo_ionosphere_echo_check_cache_key
remove_ionosphere_echo_check_key = echo_ionosphere_check_cache_key
if remove_ionosphere_echo_check_key:
try:
self.redis_conn.delete(remove_ionosphere_echo_check_key)
logger.info(
'deleted Redis check key - %s' % (remove_ionosphere_echo_check_key))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to delete Redis check key - %s' % (remove_ionosphere_echo_check_key))
if create_ionosphere_echo_check_key:
try:
key_created_at = int(time())
self.redis_conn.setex(
# @modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# create_ionosphere_echo_check_key, 300, [key_created_at])
create_ionosphere_echo_check_key, 300, key_created_at)
logger.info(
'created Redis check key - %s' % (create_ionosphere_echo_check_key))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to create Redis check key - %s' % (create_ionosphere_echo_check_key))
if echo_check:
try:
if fps_db_object:
for row in fps_db_object:
if int(row['full_duration']) == int(settings.FULL_DURATION):
fp_ids.append(int(row['id']))
echo_fp_ids.append(int(row['id']))
logger.info('appending ionosphere_echo fp id %s matched full_duration of %s - %s' % (str(row['id']), str(settings.FULL_DURATION), base_name))
fp_count_with_echo = len(fp_ids)
echo_fp_count = len(echo_fp_ids)
if echo_fp_count == 0:
echo_check = False
if echo_fp_count > 0:
logger.info('added an additional %s echo fp ids for %s' % (str(echo_fp_count), base_name))
logger.info('determined a total of %s fp ids (incl. echo) for %s' % (str(fp_count_with_echo), base_name))
echo_calculated_feature_file = '%s/%s.echo.tsfresh.input.csv.features.transposed.csv' % (metric_training_data_dir, base_name)
if os.path.isfile(echo_calculated_feature_file):
logger.info('echo calculated features available - %s' % (echo_calculated_feature_file))
echo_calculated_feature_file_found = True
else:
use_context = 'ionosphere_echo_check'
f_calc = None
try:
fp_csv, successful, fp_exists, fp_id, log_msg, traceback_format_exc, f_calc = calculate_features_profile(skyline_app, metric_timestamp, base_name, use_context)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate features')
if os.path.isfile(echo_calculated_feature_file):
logger.info('echo calculated features available - %s' % (echo_calculated_feature_file))
echo_calculated_feature_file_found = True
echo_calculated_features = []
if echo_calculated_feature_file_found:
try:
echo_calculated_features = get_calculated_features(echo_calculated_feature_file)
except:
# 20190412 - just for debug
logger.error(traceback.format_exc())
logger.error('error :: ionosphere_echo_check no echo_calculated_features were determined')
echo_calculated_features = False
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to process echo')
# Compare calculated features to feature values for each fp id
not_anomalous = False
if calculated_feature_file_found:
for fp_id in fp_ids:
if not metrics_id:
logger.error('error :: metric id not known')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return False
# @added 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# If the Ionosphere features profile checks are approaching the
# ionosphere_max_runtime, skip the remaining checks.
time_now_check = int(time())
# Allow 5 seconds for layers checks to be done
max_runtime_tolereance = ionosphere_max_runtime - 5
running_for = time_now_check - check_process_start
if running_for >= max_runtime_tolereance:
logger.info('features profile checks have been running for %s seconds, the ionosphere_max_runtime is about to be breached, skipping remaining features profile checks' % str(running_for))
break
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
check_type = 'ionosphere'
if echo_check:
for echo_fp_id in echo_fp_ids:
if fp_id == echo_fp_id:
check_type = 'ionosphere_echo_check'
if check_type == 'ionosphere_echo_check':
if not echo_calculated_features:
continue
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_checked += 1
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis set instead of Manager().list to reduce memory
# self.features_profiles_checked.append(fp_id)
redis_set = 'ionosphere.features_profiles_checked'
data = str(fp_id)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
features_count = None
fp_features = []
# Get features for fp_id from z_fp_<metric_id> table where the
# features profile is the same full_duration
metric_fp_table = 'z_fp_%s' % str(metrics_id)
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine for feature_id and values from %s' % metric_fp_table)
if not engine:
logger.error('error :: engine not obtained for feature_id and values from %s' % metric_fp_table)
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
# First check to determine if the fp_id has data in memcache
# before querying the database
fp_id_feature_values = None
if settings.MEMCACHE_ENABLED:
fp_id_feature_values_key = 'fp.id.%s.feature.values' % str(fp_id)
try:
fp_id_feature_values = self.memcache_client.get(fp_id_feature_values_key)
# if memcache does not have the key the response to the
# client is None, it does not except
except:
logger.error('error :: failed to get %s from memcache' % fp_id_feature_values_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
if fp_id_feature_values:
fp_features = literal_eval(fp_id_feature_values)
logger.info('using memcache %s key data' % fp_id_feature_values_key)
if not fp_features:
try:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
stmt = 'SELECT feature_id, value FROM %s WHERE fp_id=%s' % (metric_fp_table, str(fp_id)) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
fp_feature_id = int(row['feature_id'])
fp_value = float(row['value'])
fp_features.append([fp_feature_id, fp_value])
connection.close()
features_count = len(fp_features)
logger.info('determined %s features for fp_id %s' % (str(features_count), str(fp_id)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine feature_id, value from %s' % metric_fp_table)
if fp_features and settings.MEMCACHE_ENABLED:
fp_id_feature_values_key = 'fp.id.%s.feature.values' % str(fp_id)
try:
self.memcache_client.set(fp_id_feature_values_key, fp_features)
logger.info('populated memcache %s key' % fp_id_feature_values_key)
except:
logger.error('error :: failed to set %s in memcache' % fp_id_feature_values_key)
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
if settings.MEMCACHE_ENABLED:
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
# @added 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Feature #1844: ionosphere_matched DB table
# Added the calculated features sum for verification purposes
all_calc_features_sum_list = []
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
if check_type == 'ionosphere':
use_calculated_features = calculated_features
if check_type == 'ionosphere_echo_check':
use_calculated_features = echo_calculated_features
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
# for feature_name, calc_value in calculated_features:
for feature_name, calc_value in use_calculated_features:
all_calc_features_sum_list.append(float(calc_value))
all_calc_features_sum = sum(all_calc_features_sum_list)
# Convert feature names in calculated_features to their id
logger.info('converting tsfresh feature names to Skyline feature ids')
calc_features_by_id = []
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
# for feature_name, calc_value in calculated_features:
for feature_name, calc_value in use_calculated_features:
for skyline_feature_id, name in TSFRESH_FEATURES:
if feature_name == name:
calc_features_by_id.append([skyline_feature_id, float(calc_value)])
# Determine what features each data has, extract only values for
# common features.
logger.info('determining common features')
relevant_fp_feature_values = []
relevant_calc_feature_values = []
for skyline_feature_id, calc_value in calc_features_by_id:
for fp_feature_id, fp_value in fp_features:
if skyline_feature_id == fp_feature_id:
relevant_fp_feature_values.append(fp_value)
relevant_calc_feature_values.append(calc_value)
# Determine the sum of each set
relevant_fp_feature_values_count = len(relevant_fp_feature_values)
relevant_calc_feature_values_count = len(relevant_calc_feature_values)
if relevant_fp_feature_values_count != relevant_calc_feature_values_count:
logger.error('error :: mismatch in number of common features')
logger.error('error :: relevant_fp_feature_values_count - %s' % str(relevant_fp_feature_values_count))
logger.error('error :: relevant_calc_feature_values_count - %s' % str(relevant_calc_feature_values_count))
continue
else:
logger.info('comparing on %s common features' % str(relevant_fp_feature_values_count))
if relevant_fp_feature_values_count == 0:
logger.error('error :: relevant_fp_feature_values_count is zero')
continue
# Determine the sum of each set
sum_fp_values = sum(relevant_fp_feature_values)
sum_calc_values = sum(relevant_calc_feature_values)
logger.info(
'sum of the values of the %s common features in features profile - %s' % (
str(relevant_fp_feature_values_count), str(sum_fp_values)))
logger.info(
'sum of the values of the %s common features in the calculated features - %s' % (
str(relevant_calc_feature_values_count), str(sum_calc_values)))
# Determine whether each set is positive or negative
# # if the same carry on
# # if both negative, make then both positive
# Sum fp values, Sum calculated - handle negatives like features_sum :: -3389570699080000.0000000000
# Determine whether each set is positive or negative
# # if the same carry on
# # if both negative, make then both positive postive_sums
fp_sum_array = [sum_fp_values]
calc_sum_array = [sum_calc_values]
percent_different = 100
sums_array = np.array([sum_fp_values, sum_calc_values], dtype=float)
try:
calc_percent_different = np.diff(sums_array) / sums_array[:-1] * 100.
percent_different = calc_percent_different[0]
logger.info('percent_different between common features sums - %s' % str(percent_different))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate percent_different')
continue
almost_equal = None
try:
np.testing.assert_array_almost_equal(fp_sum_array, calc_sum_array)
almost_equal = True
except:
almost_equal = False
if almost_equal:
not_anomalous = True
# @modified 20170118 - Bug #1860: Debug learn not matched in ionosphere
# This broke it, no variable was interpolated
# logger.info('common features sums are almost equal, not anomalous' % str(relevant_fp_feature_values_count))
logger.info('common features sums are almost equal, not anomalous')
# @added 20161229 - Feature #1830: Ionosphere alerts
# Update the features profile checked count and time
logger.info('updating checked details in db for %s' % (str(fp_id)))
# update matched_count in ionosphere_table
checked_timestamp = int(time())
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update checked details in db for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update checked details in db for %s' % (str(fp_id)))
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == fp_id).
values(checked_count=ionosphere_table.c.checked_count + 1,
last_checked=checked_timestamp))
connection.close()
logger.info('updated checked_count for %s' % str(fp_id))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update checked_count and last_checked for %s ' % str(fp_id))
# if diff_in_sums <= 1%:
if percent_different < 0:
new_pdiff = percent_different * -1
percent_different = new_pdiff
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
if check_type == 'ionosphere':
use_percent_similar = float(settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR)
if check_type == 'ionosphere_echo_check':
try:
use_percent_similar = float(settings.IONOSPHERE_ECHO_FEATURES_PERCENT_SIMILAR)
except:
use_percent_similar = 2.0
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
# if percent_different < settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR:
if percent_different < use_percent_similar:
not_anomalous = True
# log
logger.info('not anomalous - features profile match - %s' % base_name)
logger.info(
'calculated features sum are within %s percent of fp_id %s with %s, not anomalous' %
(str(use_percent_similar),
str(fp_id), str(percent_different)))
if check_type == 'ionosphere_echo_check':
logger.info('ionosphere_echo_check - not anomalous with fp id %s for %s' % (str(fp_id), base_name))
# @added 20180617 - Feature #2404: Ionosphere - fluid approximation
# Now if not matched use Min-Max scaling as per
# http://sebastianraschka.com/Articles/2014_about_feature_scaling.html#numpy
# Min-Max scale the fp time series z_ts_<metric_id> SELECT WHERE fp_id
# or from memcache to create minmax_fp_ts
# Min-Max scale the current time series to create minmax_anomalous_ts
# Create features profiles for minmax_fp_ts
# Create features profiles for minmax_anomalous_ts
try:
minmax_scaling_enabled = settings.IONOSPHERE_MINMAX_SCALING_ENABLED
except:
minmax_scaling_enabled = False
minmax_not_anomalous = False
minmax_check = False
minmax = 0
if not not_anomalous:
if minmax_scaling_enabled:
minmax_check = True
if added_by == 'ionosphere_learn' and minmax_check:
minmax_check = False
logger.info('ionosphere_learn job not minmax scaling')
if minmax_check:
logger.info('running minmax scaling')
# First check to determine if the z_ts_<mertic_id> for the fp
# has data in memcache before querying the database
metric_fp_ts_table = 'z_ts_%s' % str(metrics_id)
fp_id_metric_ts = []
if settings.MEMCACHE_ENABLED:
fp_id_metric_ts_key = 'fp.%s.%s.ts' % (str(fp_id), str(metrics_id))
try:
fp_id_metric_ts_object = self.memcache_client.get(fp_id_metric_ts_key)
# if memcache does not have the key the response to the
# client is None, it does not except
except:
logger.error('error :: failed to get %s from memcache' % fp_id_metric_ts_key)
try:
self.memcache_client.close()
except:
logger.error('error :: failed to close memcache_client')
if fp_id_metric_ts_object:
fp_id_metric_ts = literal_eval(fp_id_metric_ts_object)
logger.info('used memcache %s key data to populate fp_id_metric_ts with %s data points' % (fp_id_metric_ts_key, str(len(fp_id_metric_ts))))
else:
logger.info('no memcache %s key data, will use database' % fp_id_metric_ts_key)
if not fp_id_metric_ts:
if LOCAL_DEBUG:
logger.debug('debug :: getting data from %s database table for fp id %s to populate the fp_id_metric_ts list' % (metric_fp_ts_table, str(fp_id)))
try:
stmt = 'SELECT timestamp, value FROM %s WHERE fp_id=%s' % (metric_fp_ts_table, str(fp_id)) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
fp_id_ts_timestamp = int(row['timestamp'])
fp_id_ts_value = float(row['value'])
fp_id_metric_ts.append([fp_id_ts_timestamp, fp_id_ts_value])
connection.close()
values_count = len(fp_id_metric_ts)
logger.info('determined %s values for the fp_id time series %s for %s' % (str(values_count), str(fp_id), str(base_name)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine timestamps and values from %s' % metric_fp_ts_table)
if fp_id_metric_ts and settings.MEMCACHE_ENABLED:
fp_id_metric_ts_key = 'fp.%s.%s.ts' % (str(fp_id), str(metrics_id))
try:
self.memcache_client.set(fp_id_metric_ts_key, fp_id_metric_ts)
logger.info('populated memcache %s key' % fp_id_metric_ts_key)
except:
logger.error('error :: failed to set %s in memcache' % fp_id_metric_ts_key)
try:
self.memcache_client.close()
except:
logger.error('error :: failed to close memcache_client')
# Get anomalous time series
anomalous_ts_values_count = 0
if fp_id_metric_ts:
anomalous_timeseries_not_defined = True
try:
test_anomalous_timeseries = anomalous_timeseries
if len(test_anomalous_timeseries) > 0:
anomalous_timeseries_not_defined = False
except:
logger.info('anomalous_timeseries is not defined loading from anomaly json')
timeseries_dir = base_name.replace('.', '/')
metric_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, metric_timestamp,
timeseries_dir)
anomaly_json = '%s/%s.json' % (metric_data_dir, base_name)
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
if check_type == 'ionosphere_echo_check':
anomaly_json = redis_anomaly_json
if not echo_anomalous_timeseries:
try:
with open((redis_anomaly_json), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
del raw_timeseries
echo_anomalous_timeseries = literal_eval(timeseries_array_str)
del timeseries_array_str
if len(echo_anomalous_timeseries) > 0:
logger.info('echo_anomalous_timeseries was populated from anomaly json %s with %s data points from for creating the minmax_anomalous_ts' % (redis_anomaly_json, str(len(echo_anomalous_timeseries))))
else:
logger.error('error :: echo_anomalous_timeseries for minmax_anomalous_ts is not populated from anomaly json - %s' % redis_anomaly_json)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not create echo_anomalous_timeseries from anomaly json %s' % redis_anomaly_json)
else:
logger.info('echo_anomalous_timeseries has %s data points from for creating the minmax_anomalous_ts' % (str(len(echo_anomalous_timeseries))))
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# if anomalous_timeseries_not_defined:
if anomalous_timeseries_not_defined and check_type == 'ionosphere':
try:
with open((anomaly_json), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
del raw_timeseries
anomalous_timeseries = literal_eval(timeseries_array_str)
del timeseries_array_str
if len(anomalous_timeseries) > 0:
logger.info('anomalous_timeseries was populated from anomaly json %s with %s data points from for creating the minmax_anomalous_ts' % (anomaly_json, str(len(anomalous_timeseries))))
else:
logger.error('error :: anomalous_timeseries for minmax_anomalous_ts is not populated from anomaly json - %s' % anomaly_json)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not create anomalous_timeseries from anomaly json %s' % anomaly_json)
else:
if check_type == 'ionosphere':
logger.info('anomalous_timeseries has %s data points from for creating the minmax_anomalous_ts' % (str(len(anomalous_timeseries))))
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
if check_type == 'ionosphere':
use_anomalous_timeseries = anomalous_timeseries
if check_type == 'ionosphere_echo_check':
use_anomalous_timeseries = echo_anomalous_timeseries
anomalous_ts_values_count = len(use_anomalous_timeseries)
# @added 20180621 - Feature #2404: Ionosphere - fluid approximation
# Check ranges and only Min-Max scale if the 2 time series
# are similar in range
# @added 20180819 - Bug #2534: Ionosphere - fluid approximation - IONOSPHERE_MINMAX_SCALING_RANGE_TOLERANCE on low ranges
# TODO
try:
range_tolerance = settings.IONOSPHERE_MINMAX_SCALING_RANGE_TOLERANCE
except:
range_tolerance = 0.15
range_tolerance_percentage = range_tolerance * 100
check_range = False
range_similar = False
if fp_id_metric_ts:
if anomalous_ts_values_count > 0:
check_range = True
lower_range_similar = False
upper_range_similar = False
if check_range:
try:
minmax_fp_values = [x[1] for x in fp_id_metric_ts]
min_fp_value = min(minmax_fp_values)
max_fp_value = max(minmax_fp_values)
except:
min_fp_value = False
max_fp_value = False
try:
minmax_anomalous_values = [x2[1] for x2 in use_anomalous_timeseries]
min_anomalous_value = min(minmax_anomalous_values)
max_anomalous_value = max(minmax_anomalous_values)
except:
min_anomalous_value = False
max_anomalous_value = False
lower_range_not_same = True
try:
try:
if int(min_fp_value) == int(min_anomalous_value):
lower_range_not_same = False
lower_range_similar = True
logger.info('min value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are the same' % (
str(min_fp_value), str(min_anomalous_value)))
except:
lower_range_not_same = True
if min_fp_value and min_anomalous_value and lower_range_not_same:
if int(min_fp_value) == int(min_anomalous_value):
lower_range_similar = True
logger.info('min value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are the same' % (
str(min_fp_value), str(min_anomalous_value)))
else:
lower_min_fp_value = int(min_fp_value - (min_fp_value * range_tolerance))
upper_min_fp_value = int(min_fp_value + (min_fp_value * range_tolerance))
if int(min_anomalous_value) in range(lower_min_fp_value, upper_min_fp_value):
lower_range_similar = True
logger.info('min value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are similar within %s percent of each other' % (
str(min_fp_value),
str(min_anomalous_value),
str(range_tolerance_percentage)))
if not lower_range_similar:
logger.info('lower range of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are not similar' % (
str(min_fp_value), str(min_anomalous_value)))
upper_range_not_same = True
try:
if int(max_fp_value) == int(max_anomalous_value):
upper_range_not_same = False
upper_range_similar = True
logger.info('max value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are the same' % (
str(max_fp_value), str(max_anomalous_value)))
except:
upper_range_not_same = True
if max_fp_value and max_anomalous_value and lower_range_similar and upper_range_not_same:
# @added 20180717 - Task #2446: Optimize Ionosphere
# Feature #2404: Ionosphere - fluid approximation
# On low values such as 1 and 2, the range_tolerance
# should be adjusted to account for the very small
# range. TODO
lower_max_fp_value = int(max_fp_value - (max_fp_value * range_tolerance))
upper_max_fp_value = int(max_fp_value + (max_fp_value * range_tolerance))
if int(max_anomalous_value) in range(lower_max_fp_value, upper_max_fp_value):
upper_range_similar = True
logger.info('max value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are similar within %s percent of each other' % (
str(max_fp_value), str(max_anomalous_value),
str(range_tolerance_percentage)))
else:
logger.info('max value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are not similar' % (
str(max_fp_value), str(max_anomalous_value)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not calculate range similarity with the current anomalous_timeseries and the fp id %s time series' % (str(fp_id)))
if lower_range_similar and upper_range_similar:
range_similar = True
else:
logger.info('the ranges of fp_id_metric_ts and anomalous_timeseries differ significantly Min-Max scaling will be skipped')
minmax_fp_ts = []
# if fp_id_metric_ts:
if range_similar:
if LOCAL_DEBUG:
logger.debug('debug :: creating minmax_fp_ts from minmax scaled fp_id_metric_ts')
try:
minmax_fp_values = [x[1] for x in fp_id_metric_ts]
x_np = np.asarray(minmax_fp_values)
# Min-Max scaling
np_minmax = (x_np - x_np.min()) / (x_np.max() - x_np.min())
for (ts, v) in zip(fp_id_metric_ts, np_minmax):
minmax_fp_ts.append([ts[0], v])
logger.info('minmax_fp_ts list populated with the minmax scaled time series with %s data points' % str(len(minmax_fp_ts)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not minmax scale fp id %s time series for %s' % (str(fp_id), str(base_name)))
if not minmax_fp_ts:
logger.error('error :: minmax_fp_ts list not populated')
minmax_anomalous_ts = []
if minmax_fp_ts:
# Only process if they are approximately the same length
minmax_fp_ts_values_count = len(minmax_fp_ts)
if minmax_fp_ts_values_count - anomalous_ts_values_count in range(-14, 14):
try:
minmax_anomalous_values = [x2[1] for x2 in use_anomalous_timeseries]
x_np = np.asarray(minmax_anomalous_values)
# Min-Max scaling
np_minmax = (x_np - x_np.min()) / (x_np.max() - x_np.min())
for (ts, v) in zip(fp_id_metric_ts, np_minmax):
minmax_anomalous_ts.append([ts[0], v])
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine np_minmax with current time series anomalous_timeseries and fp id %s time series' % (str(fp_id)))
if len(minmax_anomalous_ts) > 0:
logger.info('minmax_anomalous_ts is populated with %s data points' % str(len(minmax_anomalous_ts)))
else:
logger.error('error :: minmax_anomalous_ts is not populated')
else:
logger.info('minmax scaled check will be skipped - anomalous_ts_values_count is %s and minmax_fp_ts is %s' % (str(anomalous_ts_values_count), str(minmax_fp_ts_values_count)))
minmax_fp_ts_csv = '%s/fpid.%s.%s.minmax_fp_ts.tsfresh.input.std.csv' % (
settings.SKYLINE_TMP_DIR, str(fp_id), base_name)
minmax_fp_fname_out = minmax_fp_ts_csv + '.transposed.csv'
anomalous_ts_csv = '%s/%s.%s.minmax_anomalous_ts.tsfresh.std.csv' % (
settings.SKYLINE_TMP_DIR, metric_timestamp, base_name)
anomalous_fp_fname_out = anomalous_ts_csv + '.transposed.csv'
tsf_settings = ReasonableFeatureExtractionSettings()
tsf_settings.disable_progressbar = True
minmax_fp_features_sum = None
minmax_anomalous_features_sum = None
if minmax_anomalous_ts and minmax_fp_ts:
if LOCAL_DEBUG:
logger.debug('debug :: analyzing minmax_fp_ts and minmax_anomalous_ts')
if not os.path.isfile(minmax_fp_ts_csv):
if LOCAL_DEBUG:
logger.debug('debug :: creating %s from minmax_fp_ts' % minmax_fp_ts_csv)
datapoints = minmax_fp_ts
converted = []
for datapoint in datapoints:
try:
new_datapoint = [float(datapoint[0]), float(datapoint[1])]
converted.append(new_datapoint)
except: # nosec
continue
del datapoints
if LOCAL_DEBUG:
if len(converted) > 0:
logger.debug('debug :: converted is populated')
else:
logger.debug('debug :: error :: converted is not populated')
for ts, value in converted:
try:
utc_ts_line = '%s,%s,%s\n' % (base_name, str(int(ts)), str(value))
with open(minmax_fp_ts_csv, 'a') as fh:
fh.write(utc_ts_line)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not write to file %s' % (str(minmax_fp_ts_csv)))
del converted
else:
logger.info('file found %s, using for data' % minmax_fp_ts_csv)
if not os.path.isfile(minmax_fp_ts_csv):
logger.error('error :: file not found %s' % minmax_fp_ts_csv)
else:
logger.info('file exists to create the minmax_fp_ts data frame from - %s' % minmax_fp_ts_csv)
try:
df = pd.read_csv(minmax_fp_ts_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value'])
df.columns = ['metric', 'timestamp', 'value']
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created data frame from %s' % (str(minmax_fp_ts_csv)))
try:
df_features = extract_features(
df, column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, feature_extraction_settings=tsf_settings)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created df_features from %s' % (str(minmax_fp_ts_csv)))
del df
# Create transposed features csv
if not os.path.isfile(minmax_fp_fname_out):
# Transpose
df_t = df_features.transpose()
df_t.to_csv(minmax_fp_fname_out)
del df_t
else:
if LOCAL_DEBUG:
logger.debug('debug :: file exists - %s' % minmax_fp_fname_out)
try:
# Calculate the count and sum of the features values
df_sum = pd.read_csv(
minmax_fp_fname_out, delimiter=',', header=0,
names=['feature_name', 'value'])
df_sum.columns = ['feature_name', 'value']
df_sum['feature_name'] = df_sum['feature_name'].astype(str)
df_sum['value'] = df_sum['value'].astype(float)
minmax_fp_features_count = len(df_sum['value'])
minmax_fp_features_sum = df_sum['value'].sum()
logger.info('minmax_fp_ts - features_count: %s, features_sum: %s' % (str(minmax_fp_features_count), str(minmax_fp_features_sum)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created df_sum from %s' % (str(minmax_fp_fname_out)))
if minmax_fp_features_count > 0:
if LOCAL_DEBUG:
logger.debug('debug :: minmax_fp_features_count of the minmax_fp_ts is %s' % str(minmax_fp_features_count))
else:
logger.error('error :: minmax_fp_features_count is %s' % str(minmax_fp_features_count))
if not os.path.isfile(anomalous_ts_csv):
datapoints = minmax_anomalous_ts
converted = []
for datapoint in datapoints:
try:
new_datapoint = [float(datapoint[0]), float(datapoint[1])]
converted.append(new_datapoint)
except: # nosec
continue
del datapoints
for ts, value in converted:
utc_ts_line = '%s,%s,%s\n' % (base_name, str(int(ts)), str(value))
with open(anomalous_ts_csv, 'a') as fh:
fh.write(utc_ts_line)
del converted
df = pd.read_csv(anomalous_ts_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value'])
df.columns = ['metric', 'timestamp', 'value']
df_features_current = extract_features(
df, column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, feature_extraction_settings=tsf_settings)
del df
# Create transposed features csv
if not os.path.isfile(anomalous_fp_fname_out):
# Transpose
df_t = df_features_current.transpose()
df_t.to_csv(anomalous_fp_fname_out)
del df_t
# Calculate the count and sum of the features values
df_sum_2 = pd.read_csv(
anomalous_fp_fname_out, delimiter=',', header=0,
names=['feature_name', 'value'])
df_sum_2.columns = ['feature_name', 'value']
df_sum_2['feature_name'] = df_sum_2['feature_name'].astype(str)
df_sum_2['value'] = df_sum_2['value'].astype(float)
minmax_anomalous_features_count = len(df_sum_2['value'])
minmax_anomalous_features_sum = df_sum_2['value'].sum()
logger.info('minmax_anomalous_ts - minmax_anomalous_features_count: %s, minmax_anomalous_features_sum: %s' % (
str(minmax_anomalous_features_count),
str(minmax_anomalous_features_sum)))
if minmax_fp_features_sum and minmax_anomalous_features_sum:
percent_different = None
try:
fp_sum_array = [minmax_fp_features_sum]
calc_sum_array = [minmax_anomalous_features_sum]
percent_different = 100
sums_array = np.array([minmax_fp_features_sum, minmax_anomalous_features_sum], dtype=float)
calc_percent_different = np.diff(sums_array) / sums_array[:-1] * 100.
percent_different = calc_percent_different[0]
logger.info('percent_different between minmax scaled features sums - %s' % str(percent_different))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate percent_different from minmax scaled features sums')
if percent_different:
almost_equal = None
try:
np.testing.assert_array_almost_equal(fp_sum_array, calc_sum_array)
almost_equal = True
except:
almost_equal = False
if almost_equal:
minmax_not_anomalous = True
logger.info('minmax scaled common features sums are almost equal, not anomalous')
# if diff_in_sums <= 1%:
if percent_different < 0:
new_pdiff = percent_different * -1
percent_different = new_pdiff
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
if check_type == 'ionosphere':
mm_use_percent_similar = float(settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR)
if check_type == 'ionosphere_echo_check':
try:
mm_use_percent_similar = float(settings.IONOSPHERE_ECHO_MINMAX_SCALING_FEATURES_PERCENT_SIMILAR)
except:
mm_use_percent_similar = 3.5
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# if percent_different < settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR:
if percent_different < mm_use_percent_similar:
minmax_not_anomalous = True
# log
logger.info('not anomalous - minmax scaled features profile match - %s - %s' % (base_name, str(minmax_not_anomalous)))
logger.info(
'minmax scaled calculated features sum are within %s percent of fp_id %s with %s, not anomalous' %
(str(mm_use_percent_similar),
str(fp_id), str(percent_different)))
if check_type == 'ionosphere_echo_check':
logger.info('ionosphere_echo_check :: not anomalous - minmax scaled features profile match - %s' % (base_name))
if minmax_not_anomalous:
not_anomalous = True
minmax = 1
# Created time series resources for graphing in
# the matched page
try:
if os.path.isfile(minmax_fp_ts_csv):
self.remove_metric_check_file(str(minmax_fp_ts_csv))
except:
pass
try:
if os.path.isfile(minmax_fp_fname_out):
self.remove_metric_check_file(str(minmax_fp_fname_out))
except:
pass
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Clean up echo files
if echo_check:
echo_calculated_feature_file = '%s/%s.echo.tsfresh.input.csv.features.transposed.csv' % (metric_training_data_dir, base_name)
try:
if os.path.isfile(echo_calculated_feature_file):
self.remove_metric_check_file(str(echo_calculated_feature_file))
except:
pass
echo_features_file = '%s/%s.%s.echo.fp.details.txt' % (metric_training_data_dir, str(metric_timestamp), base_name)
try:
if os.path.isfile(echo_features_file):
self.remove_metric_check_file(str(echo_features_file))
except:
pass
# Clean up
if minmax_check:
try:
clean_file = anomalous_ts_csv
if os.path.isfile(anomalous_ts_csv):
self.remove_metric_check_file(str(anomalous_ts_csv))
logger.info('cleaned up - %s' % clean_file)
except:
logger.info('no anomalous_ts_csv file to clean up')
try:
clean_file = anomalous_fp_fname_out
if os.path.isfile(anomalous_fp_fname_out):
self.remove_metric_check_file(str(anomalous_fp_fname_out))
logger.info('cleaned up - %s' % clean_file)
except:
logger.info('no anomalous_fp_fname_out file to clean up')
# END - Feature #2404: Ionosphere - fluid approximation
if not_anomalous:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.not_anomalous.append(base_name)
redis_set = 'ionosphere.not_anomalous'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# update matched_count in ionosphere_table
matched_timestamp = int(time())
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update matched details in db for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update matched details in db for %s' % (str(fp_id)))
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == fp_id).
values(matched_count=ionosphere_table.c.matched_count + 1,
last_matched=matched_timestamp))
connection.close()
logger.info('updated matched_count for %s' % str(fp_id))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update matched_count and last_matched for %s ' % str(fp_id))
# @added 20170107 - Feature #1844: ionosphere_matched DB table
# Added ionosphere_matched update
# @modified 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update ionosphere_matched for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update ionosphere_matched for %s' % (str(fp_id)))
try:
ionosphere_matched_table, log_msg, trace = ionosphere_matched_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_matched_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere_matched_table meta for %s' % base_name)
# @added 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax scaling values
if minmax_not_anomalous == 1:
minmax_fp_features_sum = float(minmax_fp_features_sum)
minmax_fp_features_count = int(minmax_fp_features_count)
minmax_anomalous_features_sum = float(minmax_anomalous_features_sum)
minmax_anomalous_features_count = int(minmax_anomalous_features_count)
else:
minmax_fp_features_sum = 0
minmax_fp_features_count = 0
minmax_anomalous_features_sum = 0
minmax_anomalous_features_count = 0
# @added 20190919 - Feature #2484: FULL_DURATION feature profiles
# If there are additional echo fps then the database can be
# updated with the fp_count_with_echo value for fp_count in
# the ionosphere_matched table
if fp_count_with_echo > fp_count:
fp_count = fp_count_with_echo
try:
connection = engine.connect()
# @modified 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Feature #1844: ionosphere_matched DB table
# Added all_calc_features_sum, all_calc_features_count,
# sum_calc_values, common_features_count, tsfresh_version
ins = ionosphere_matched_table.insert().values(
fp_id=int(fp_id),
metric_timestamp=int(metric_timestamp),
all_calc_features_sum=float(all_calc_features_sum),
all_calc_features_count=len(all_calc_features_sum_list),
sum_common_values=float(sum_calc_values),
common_features_count=int(relevant_calc_feature_values_count),
tsfresh_version=str(tsfresh_version),
# @added 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax scaling values
minmax=minmax,
minmax_fp_features_sum=minmax_fp_features_sum,
minmax_fp_features_count=minmax_fp_features_count,
minmax_anomalous_features_sum=minmax_anomalous_features_sum,
minmax_anomalous_features_count=minmax_anomalous_features_count,
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_count=fp_count, fp_checked=fp_checked)
result = connection.execute(ins)
connection.close()
new_matched_id = result.inserted_primary_key[0]
# @modified 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax
if minmax == 0:
logger.info('new ionosphere_matched id: %s' % str(new_matched_id))
else:
logger.info('new minmax scaled ionosphere_matched id: %s' % str(new_matched_id))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: could not update ionosphere_matched for %s with with timestamp %s' % (
str(fp_id), str(metric_timestamp)))
# @added 20170331 - Task #1988: Review - Ionosphere layers - always show layers
# Feature #1960: ionosphere_layers
# Added mirror functionality of the layers_id_matched_file
# for feature profile matches too as it has proved useful
# in the frontend with regards to training data sets being
# matched by layers and can do the same for in the frontend
# training data for feature profile matches too.
if not_anomalous:
profile_id_matched_file = '%s/%s.profile_id_matched.fp_id' % (
metric_training_data_dir, base_name)
if not os.path.isfile(profile_id_matched_file):
try:
write_data_to_file(skyline_app, profile_id_matched_file, 'w', str(fp_id))
logger.info('added matched fp_id %s - %s' % (
str(fp_id), profile_id_matched_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: added matched fp_id %s - %s' % (
str(fp_id), profile_id_matched_file))
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Stop on the first match
break
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.testing.assert_almost_equal.html
# @added 20161214 - Add a between timeframe option, e.g. if
# fp match, only see this as not anomalous if hour (and or min)
# is between x and y - handle rollovers, cron log archives, etc.
logger.info('debug :: %s is a features profile for %s' % (str(fp_id), base_name))
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# If this is an ionosphere_learn check them we handle it before
# the others and exit and ionosphere_learn uses the Redis work
# queue. Here we go! Learn!
if added_by == 'ionosphere_learn':
if not_anomalous:
logger.info('an ionosphere_learn metric has been found to be not anomalous before')
# @added 20170607 - Feature #2010: Ionosphere learn - rate limiting profile learning
learning_rate_limited = False
now = int(time())
rate_limit_timestamp = now - 3600
rate_limit_datetime = datetime.fromtimestamp(rate_limit_timestamp)
f = '%Y-%m-%d %H:%M:%S'
after_datetime = rate_limit_datetime.strftime(f)
try:
connection = engine.connect()
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
result = connection.execute(
'SELECT * FROM ionosphere WHERE metric_id=%s AND created_timestamp > \'%s\' AND generation > 1' % (str(metrics_id), str(after_datetime))) # nosec
for row in result:
last_full_duration = row['full_duration']
if int(full_duration) <= int(last_full_duration):
learning_rate_limited = True
break
except:
logger.error(traceback.format_exc())
logger.error('error :: determining whether learning should be rate limited')
if learning_rate_limited:
logger.info('learning currently dynamically rate limited on %s' % str(base_name))
# Exit the ionosphere_learn check
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
logger.info('learning is not currently rate limited on %s' % str(base_name))
# @added 20170605 - Bug #2038: Ionosphere learn parent generation incorrect
# Determine generation of the matched fp not the last in the
# list
try:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
stmt = 'SELECT generation FROM ionosphere WHERE id=%s' % str(fp_id) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
matched_fp_generation = int(row['generation'])
connection.close()
logger.info(
'determined matched fp_id %s is a generation %s profile' % (
str(fp_id), str(matched_fp_generation)))
current_fp_generation = matched_fp_generation
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine generation from ionosphere table for fp id %s' % str(fp_id))
logger.info(
'ionosphere_learn metric matches the generation %s features profile id %s - %s' % (
str(current_fp_generation), str(fp_id), base_name))
# Added Redis to work_set, learn will then go off and create
# the features profile with the parent training data if
# less than max_generations, although ionosphere_learn
# should not should Ionosphere any work if the result would
# be greater than max_generations
logger.info('adding work item to Redis set ionosphere.learn.work')
ionosphere_job = 'learn_fp_learnt'
work_deadline = 'Soft'
try:
logger.info(
'LEARNT :: adding work to Redis ionosphere.learn.work set - [\'%s\', \'%s\', %s, \'%s\', %s, %s] to create a learnt features profile' % (
work_deadline, str(ionosphere_job), str(metric_timestamp), base_name,
str(fp_id), str(current_fp_generation)))
# modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# self.redis_conn.sadd('ionosphere.learn.work', ['Soft', str(ionosphere_job), int(metric_timestamp), base_name, int(fp_id), int(current_fp_generation)])
self.redis_conn.sadd('ionosphere.learn.work', str(['Soft', str(ionosphere_job), int(metric_timestamp), base_name, int(fp_id), int(current_fp_generation)]))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed adding work to Redis ionosphere.learn.work set - [\'%s\', \'%s\', %s, \'%s\', %s, %s] to make a learn features profile later' % (
work_deadline, str(ionosphere_job), str(metric_timestamp), base_name,
str(fp_id), str(current_fp_generation)))
# Exit the ionosphere_learn check
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly. We only evaluate
# the Ionosphere layer algorithms after Skyline has had an
# an opportunity to match the original and learnt features
# profiles. This enables the original, evolutionary,
# generations based learning to be continually evaluated.
# This needs to happen for any future implemenation of
# Feature #1888: Ionosphere learn - evolutionary maturity forget
logger.info('layers algorithms check')
check_layers_algorithms = False
if not not_anomalous:
check_layers_algorithms = True
if added_by == 'ionosphere_learn':
check_layers_algorithms = False
logger.info('ionosphere_learn - layers algorithms check - False')
else:
logger.info('layers algorithms check - True, %s layers to be checked' % str(fp_layers_count))
else:
logger.info('a features profile matched as not_anomalous - layers algorithms check - False')
if check_layers_algorithms and fp_layers_present:
full_duration_in_hours = int(settings.FULL_DURATION) / 3600
mirage_full_duration_json_file = '%s/%s.mirage.redis.%sh.json' % (
metric_training_data_dir, base_name,
str(int(full_duration_in_hours)))
if os.path.isfile(mirage_full_duration_json_file):
full_duration_json_file = mirage_full_duration_json_file
else:
full_duration_json_file = '%s/%s.json' % (metric_training_data_dir, base_name)
anomalous_timeseries = None
if os.path.isfile(full_duration_json_file):
logger.info('full duration ts json available for layers check - %s' % (full_duration_json_file))
try:
# Read the timeseries json file
with open((full_duration_json_file), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
anomalous_timeseries = literal_eval(timeseries_array_str)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not load json for layers check - %s' % (base_name))
logger.info('data points surfaced for layers check - %s' % (len(anomalous_timeseries)))
else:
logger.error('error :: full duration ts json for layers was not found - %s' % (full_duration_json_file))
matched_layers_id = None
for layers_id in fp_layers_ids:
if not_anomalous:
logger.info('checking layers_id %s - %s layers profiles of %s possible layers' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count)))
if not_anomalous:
logger.info('skipping checking layers_id %s - %s layers profiles of %s possible layers as layer id %s already matched' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count), str(matched_layers_id)))
continue
if int(layers_id) != 0:
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# @modified 20181014 - Feature #2430: Ionosphere validate learnt features profiles page
# layers_checked += 1
layers_checked_count += 1
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Added to Redis set here and commented out the
# self.layers_checked.append in the try below this
redis_set = 'ionosphere.layers_checked'
data = layers_id
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# Get the layers algorithms and run then on the timeseries
# @modified 20170307 - Feature #1960: ionosphere_layers
# Use except on everything, remember how fast Skyline can iterate
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Added to the ionosphere.layers_checked Redis set
# above
# self.layers_checked.append(layers_id)
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# not_anomalous = run_layer_algorithms(base_name, layers_id, anomalous_timeseries)
# @modified 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# not_anomalous = run_layer_algorithms(base_name, layers_id, anomalous_timeseries, fp_layers_count, layers_checked)
not_anomalous = run_layer_algorithms(base_name, layers_id, anomalous_timeseries, fp_layers_count, layers_checked_count)
if not_anomalous:
matched_layers_id = layers_id
except:
logger.error(traceback.format_exc())
logger.error('error :: run_layer_algorithms failed for layers_id - %s' % (str(layers_id)))
if not_anomalous:
logger.info('not_anomalous :: layers_id %s was matched after checking %s layers profiles of %s possible layers' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count)))
else:
logger.info('still anomalous :: layers_id %s was NOT matched after checking %s layers profiles of %s possible layers' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count)))
if not not_anomalous:
logger.info('anomalous - no features profiles layers were matched - %s' % base_name)
# @added 20170308 - Feature #1960: ionosphere_layers
# Feature #1854: Ionosphere learn
# A create a layer_id matched txt file in the training_data dir
# to advise the operator if a training_data set has been matched
# by a layer. Further below if app is not ionosphere_learn a
# 'learn_fp_generation' ionosphere_job is added so ionosphere_learn
# can still try and learning from the existing features profiles
# that exist even if a layer matched as not_anomalous.
if not_anomalous:
layers_id_matched_file = '%s/%s.layers_id_matched.layers_id' % (
metric_training_data_dir, base_name)
if not os.path.isfile(layers_id_matched_file):
try:
write_data_to_file(skyline_app, layers_id_matched_file, 'w', str(matched_layers_id))
logger.info('added matched layers_id %s - %s' % (
str(matched_layers_id), layers_id_matched_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: added matched layers_id %s - %s' % (
str(matched_layers_id), layers_id_matched_file))
else:
logger.info('no layers algorithm check required')
# Ionosphere layers DONE
if not not_anomalous:
logger.info('anomalous - no feature profiles were matched - %s' % base_name)
# @added 20170116 - Feature #1854: Ionosphere learn
# If this is an ionosphere_learn check an Ionosphere alert will
# not be sent back to Analyzer, Mirage or the ionosphere.learn.work
# Redis set. We exit, work is done.
if added_by == 'ionosphere_learn':
logger.info('ionosphere_learn check complete - %s' % base_name)
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.anomalous_metrics.append(base_name)
redis_set = 'ionosphere.anomalous_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# Send to panorama as Analyzer and Mirage will only alert on the
# anomaly, they will not push it to Panorama
if settings.PANORAMA_ENABLED:
if not os.path.exists(settings.PANORAMA_CHECK_PATH):
mkdir_p(settings.PANORAMA_CHECK_PATH)
# Note:
# The values are enclosed is single quoted intentionally
# as the imp.load_source used results in a shift in the
# decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
added_at = str(int(time()))
source = 'graphite'
panaroma_anomaly_data = 'metric = \'%s\'\n' \
'value = \'%s\'\n' \
'from_timestamp = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'algorithms = %s\n' \
'triggered_algorithms = %s\n' \
'app = \'%s\'\n' \
'source = \'%s\'\n' \
'added_by = \'%s\'\n' \
'added_at = \'%s\'\n' \
% (base_name, str(anomalous_value), str(int(from_timestamp)),
str(int(metric_timestamp)), str(settings.ALGORITHMS),
str(triggered_algorithms), skyline_app, source,
this_host, added_at)
# Create an anomaly file with details about the anomaly
panaroma_anomaly_file = '%s/%s.%s.txt' % (
settings.PANORAMA_CHECK_PATH, added_at,
base_name)
try:
write_data_to_file(
skyline_app, panaroma_anomaly_file, 'w',
panaroma_anomaly_data)
logger.info('added panorama anomaly file :: %s' % (panaroma_anomaly_file))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved to the Redis set function below
# self.sent_to_panorama.append(base_name)
except:
logger.error('error :: failed to add panorama anomaly file :: %s' % (panaroma_anomaly_file))
logger.info(traceback.format_exc())
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
redis_set = 'ionosphere.sent_to_panorama'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# alert ... hmmm the harder part, maybe not all the resources
# are already created, so just determining ALERTS and firing a
# trigger_alert (pull in alerter.py and mirage_alerters.py?)
# OR send back to app via Redis
# @modified 20170116 - Feature #1854: Ionosphere learn
# Only do the cache_key if not ionosphere_learn
if added_by != 'ionosphere_learn':
cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
# added 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# Added cache_key_value
cache_key_value = [float(anomalous_value), base_name, int(metric_timestamp), triggered_algorithms, full_duration]
try:
self.redis_conn.setex(
cache_key, 300,
# modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# [float(anomalous_value), base_name, int(metric_timestamp), triggered_algorithms, full_duration])
str(cache_key_value))
logger.info(
'add Redis alert key - %s - %s' %
(cache_key, str(cache_key_value)))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to add Redis key - %s - [%s, \'%s\', %s, %s, %s]' %
(cache_key, str(anomalous_value), base_name, str(int(metric_timestamp)),
str(triggered_algorithms), str(full_duration)))
# @added 20170116 - Feature #1854: Ionosphere learn
# Added an ionosphere_learn job for the timeseries that did not
# match any profiles. Here we go! Learn!
if added_by != 'ionosphere_learn':
ionosphere_job = 'learn_fp_generation'
logger.info(
'adding an ionosphere_learn %s job for the timeseries that did not match any profiles - %s' % (
ionosphere_job, base_name))
try:
logger.info(
'adding work to Redis ionosphere.learn.work set - [\'Soft\', \'%s\', %s, \'%s\', None, None] to make a learn features profile later' % (
str(ionosphere_job), str(int(metric_timestamp)),
base_name))
# modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# self.redis_conn.sadd('ionosphere.learn.work', ['Soft', str(ionosphere_job), int(metric_timestamp), base_name, None, None])
self.redis_conn.sadd('ionosphere.learn.work', str(['Soft', str(ionosphere_job), int(metric_timestamp), base_name, None, None]))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed adding work to Redis ionosphere.learn.work set - [\'Soft\', \'%s\', %s, \'%s\', None, None] to make a learn features profile later' % (
str(ionosphere_job), str(int(metric_timestamp)),
base_name))
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
def run(self):
"""
Called when the process intializes.
"""
# Log management to prevent overwriting
# Allow the bin/<skyline_app>.d to manage the log
if os.path.isfile(skyline_app_logwait):
try:
logger.info('removing %s' % skyline_app_logwait)
os.remove(skyline_app_logwait)
except OSError:
logger.error('error :: failed to remove %s, continuing' % skyline_app_logwait)
pass
now = time()
log_wait_for = now + 5
while now < log_wait_for:
if os.path.isfile(skyline_app_loglock):
sleep(.1)
now = time()
else:
now = log_wait_for + 1
logger.info('starting %s run' % skyline_app)
if os.path.isfile(skyline_app_loglock):
logger.error('error :: bin/%s.d log management seems to have failed, continuing' % skyline_app)
try:
os.remove(skyline_app_loglock)
logger.info('log lock file removed')
except OSError:
logger.error('error :: failed to remove %s, continuing' % skyline_app_loglock)
pass
else:
logger.info('bin/%s.d log management done' % skyline_app)
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
logger.info('SKYLINE_FEEDBACK_NAMESPACES is set to %s' % str(SKYLINE_FEEDBACK_NAMESPACES))
while 1:
now = time()
# Make sure Redis is up
try:
self.redis_conn.ping()
if ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: connected to Redis')
except:
logger.error('error :: cannot connect to redis at socket path %s' % (
settings.REDIS_SOCKET_PATH))
sleep(30)
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
if settings.REDIS_PASSWORD:
self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
else:
self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
continue
# Report app up
try:
self.redis_conn.setex(skyline_app, 120, now)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
# purge_old_data_dirs after every check file run, this takes less
# than a second and keeps the purging somewhat consistent with
# input rate.
try:
logger.info('purging any old training data')
self.purge_old_data_dirs(
settings.IONOSPHERE_DATA_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
except:
logger.error('error :: purge_old_data_dirs - %s' % traceback.print_exc())
if ENABLE_IONOSPHERE_DEBUG:
logger.info(
'debug :: self.purge_old_data_dirs(%s, %s)' %
settings.IONOSPHERE_DATA_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
# @added 20170110 - Feature #1854: Ionosphere learn
# purge_old_data_dirs learn data
if settings.IONOSPHERE_LEARN:
try:
logger.info('purging any old learning data')
self.purge_old_data_dirs(
settings.IONOSPHERE_LEARN_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
except:
logger.error('error :: purge_old_data_dirs learn - %s' % traceback.print_exc())
if ENABLE_IONOSPHERE_DEBUG:
logger.info(
'debug :: self.purge_old_data_dirs(%s, %s)' %
settings.IONOSPHERE_LEARN_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
# @added 20170916 - Feature #1996: Ionosphere - matches page
# Create the ionosphere_summary_memcache_object
# @modified 20180103 - Feature #1996: Ionosphere - matches page
# The ionosphere_summary_list memcache object is not managed in
# ionosphere.py and was an artefact of some dev work that may
# resume at some point
# if settings.MEMCACHE_ENABLED:
# try:
# logger.info('updating the ionosphere_summary_memcache_object')
# self.update_ionosphere_summary_memcache_object
# except:
# logger.error('error :: update_ionosphere_summary_memcache_object - %s' % traceback.print_exc())
# self.populate the database metatdata tables
# What is my host id in the Skyline panorama DB?
host_id = False
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# Check memcached before MySQL
if settings.MEMCACHE_ENABLED:
hosts_id_key = 'hosts.id.%s' % this_host
try:
host_id = self.memcache_client.get(hosts_id_key)
# if memcache does not have the key the response to the
# client is None, it does not except
except:
logger.error('error :: failed to get %s from memcache' % hosts_id_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
if host_id:
logger.info('using memcache %s key data' % hosts_id_key)
logger.info('host_id: %s' % str(host_id))
if not host_id:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'select id FROM hosts WHERE host=\'%s\'' % this_host # nosec
results = mysql_select(skyline_app, query)
if results:
host_id = results[0][0]
logger.info('host_id: %s' % str(host_id))
else:
logger.info('failed to determine host id of %s' % this_host)
if host_id and settings.MEMCACHE_ENABLED:
try:
self.memcache_client.set(hosts_id_key, int(host_id))
logger.info('populated memcache %s key' % hosts_id_key)
except:
logger.error('error :: failed to set %s in memcache' % hosts_id_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
# if not known - INSERT hostname INTO host
if not host_id:
logger.info('inserting %s into hosts table' % this_host)
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'insert into hosts (host) VALUES (\'%s\')' % this_host # nosec
host_id = self.mysql_insert(query)
if host_id:
logger.info('new host_id: %s' % str(host_id))
if not host_id:
logger.error(
'error :: failed to determine populate %s into the hosts table' %
this_host)
sleep(30)
continue
"""
Determine if any metric has been added to add
"""
while True:
metric_var_files = False
try:
metric_var_files = [f for f in listdir(settings.IONOSPHERE_CHECK_PATH) if isfile(join(settings.IONOSPHERE_CHECK_PATH, f))]
except:
logger.error('error :: failed to list files in check dir')
logger.info(traceback.format_exc())
if not metric_var_files:
logger.info('sleeping 20 no metric check files')
sleep(20)
up_now = time()
# Report app up
try:
self.redis_conn.setex(skyline_app, 120, up_now)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Branch #922: Ionosphere
# Bringing Ionosphere online - do alert on Ionosphere metrics
# Manage the ionosphere.unique_metrics Redis set which is queried
# by Analyzer and Mirage, yes and we use multiprocessing
last_update = None
try:
last_update = self.redis_conn.get('ionosphere.manage_ionosphere_unique_metrics')
except Exception as e:
logger.error('error :: could not query Redis for ionosphere.manage_ionosphere_unique_metrics: %s' % e)
if not last_update:
pids = []
now = time()
try:
logger.info('starting manage_ionosphere_unique_metrics process')
p = Process(target=self.manage_ionosphere_unique_metrics)
pids.append(p)
p.start()
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to start manage_ionosphere_unique_metrics')
# Self monitor process and terminate if run for too long
p_starts = time()
while time() - p_starts <= 5:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'manage_ionosphere_unique_metrics completed in %.2f seconds' % (
time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('%s :: timed out, killing manage_ionosphere_unique_metrics process' % (skyline_app))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('%s :: killed manage_ionosphere_unique_metrics process' % (skyline_app))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all manage_ionosphere_unique_metrics processes')
# Discover metric anomalies to insert
metric_var_files = False
try:
metric_var_files = [f for f in listdir(settings.IONOSPHERE_CHECK_PATH) if isfile(join(settings.IONOSPHERE_CHECK_PATH, f))]
except:
logger.error('error :: failed to list files in check dir')
logger.info(traceback.format_exc())
# @added 20170104 - Feature #1842: Ionosphere - Graphite now graphs
# Task #1658: Patterning Skyline Ionosphere
# Send Ionosphere metrics to Graphite every minute now that
# Ionosphere is better tuned and Reset lists
cache_key = '%s.sent_graphite_metrics' % skyline_app
redis_sent_graphite_metrics = False
try:
redis_sent_graphite_metrics = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for key %s: %s' % (cache_key, e))
# Flush metrics to Graphite
if not redis_sent_graphite_metrics:
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# not_anomalous = str(len(self.not_anomalous))
not_anomalous = str(len(list(self.redis_conn.smembers('ionosphere.not_anomalous'))))
except:
not_anomalous = '0'
logger.info('not_anomalous :: %s' % not_anomalous)
send_metric_name = '%s.not_anomalous' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, not_anomalous)
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis set instead of Manager() list
# total_anomalies = str(len(self.anomalous_metrics))
total_anomalies = str(len(list(self.redis_conn.smembers('ionosphere.anomalous_metrics'))))
except:
total_anomalies = '0'
logger.info('total_anomalies :: %s' % total_anomalies)
send_metric_name = '%s.total_anomalies' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, total_anomalies)
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# training_metrics = str(len(self.training_metrics))
training_metrics = str(len(list(self.redis_conn.smembers('ionosphere.training_metrics'))))
except:
training_metrics = '0'
logger.info('training metrics :: %s' % training_metrics)
send_metric_name = '%s.training_metrics' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, training_metrics)
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# features_profiles_checked = str(len(self.features_profiles_checked))
features_profiles_checked = str(len(list(self.redis_conn.smembers('ionosphere.features_profiles_checked'))))
except:
features_profiles_checked = '0'
logger.info('fps checked count :: %s' % features_profiles_checked)
send_metric_name = '%s.fps_checked' % skyline_app_graphite_namespace
# @modified 20170306 - Feature #1960: ionosphere_layers
# Corrected namespace
# send_graphite_metric(skyline_app, send_metric_name, not_anomalous)
send_graphite_metric(skyline_app, send_metric_name, features_profiles_checked)
# @added 20170306 - Feature #1960: ionosphere_layers
try:
# @modified 20181014 - Feature #2430: Ionosphere validate learnt features profiles page
# layers_checked = str(len(self.layers_checked))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# str_layers_checked = str(len(self.layers_checked))
str_layers_checked = str(len(list(self.redis_conn.smembers('ionosphere.layers_checked'))))
except:
str_layers_checked = '0'
logger.info('layers checked count :: %s' % str_layers_checked)
send_metric_name = '%s.layers_checked' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, str_layers_checked)
if settings.PANORAMA_ENABLED:
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# sent_to_panorama = str(len(self.sent_to_panorama))
sent_to_panorama = str(len(list(self.redis_conn.smembers('ionosphere.sent_to_panorama'))))
except:
sent_to_panorama = '0'
logger.info('sent_to_panorama :: %s' % sent_to_panorama)
send_metric_name = '%s.sent_to_panorama' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, sent_to_panorama)
sent_graphite_metrics_now = int(time())
try:
self.redis_conn.setex(cache_key, 59, sent_graphite_metrics_now)
logger.info('updated Redis key - %s' % cache_key)
except:
logger.error('error :: failed to update Redis key - %s up' % cache_key)
# Reset lists
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
# self.anomalous_metrics[:] = []
# self.not_anomalous[:] = []
# self.features_profiles_checked[:] = []
# self.training_metrics[:] = []
# self.sent_to_panorama[:] = []
# @added 20170306 - Feature #1960: ionosphere_layers
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
# self.layers_checked[:] = []
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
delete_redis_sets = [
'ionosphere.anomalous_metrics',
'ionosphere.not_anomalous',
'ionosphere.features_profiles_checked',
'ionosphere.training_metrics',
'ionosphere.sent_to_panorama',
'ionosphere.layers_checked',
]
for i_redis_set in delete_redis_sets:
redis_set_to_delete = i_redis_set
try:
self.redis_conn.delete(redis_set_to_delete)
logger.info('deleted Redis set - %s' % redis_set_to_delete)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to delete Redis set - %s' % redis_set_to_delete)
ionosphere_job = False
learn_job = False
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
# Do not run checks if the namespace is a declared SKYLINE_FEEDBACK_NAMESPACES
# namespace that has been checked in the last 10 minutes if
# there are multiple checks to do.
rate_limit_feedback_metrics = False
if metric_var_files:
metric_var_files_sorted = sorted(metric_var_files)
metric_check_file = '%s/%s' % (settings.IONOSPHERE_CHECK_PATH, str(metric_var_files_sorted[0]))
metric_var_files_count = len(metric_var_files_sorted)
if metric_var_files_count > 2:
rate_limit_feedback_metrics = True
if rate_limit_feedback_metrics:
for i_metric_check_file in metric_var_files_sorted:
feedback_metric = False
check_metric_file_list = i_metric_check_file.split('.', -1)[1:]
last_name_element = len(check_metric_file_list) - 1
base_name = '.'.join(check_metric_file_list[0:last_name_element])
metric_namespace_elements = base_name.split('.')
for to_skip in SKYLINE_FEEDBACK_NAMESPACES:
if to_skip in base_name:
feedback_metric = True
logger.info('SKYLINE_FEEDBACK_NAMESPACES matched on to_skip %s in base_name %s' % (to_skip, base_name))
break
to_skip_namespace_elements = to_skip.split('.')
elements_matched = set(metric_namespace_elements) & set(to_skip_namespace_elements)
if len(elements_matched) == len(to_skip_namespace_elements):
feedback_metric = True
logger.info('SKYLINE_FEEDBACK_NAMESPACES matched elements in %s' % base_name)
break
if feedback_metric:
remove_feedback_metric_check = False
if metric_var_files_count > 4:
logger.info('rate limiting feedback metric, removing check for %s as Ionosphere has %s pending checks, not checking feedback metric' % (
base_name, str(metric_var_files_count)))
remove_feedback_metric_check = True
cache_key = 'ionosphere.feedback_metric.checked.%s' % (base_name)
check_done = False
try:
check_done = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not check_done:
logger.info('not removing feedback metric as no check has been done in last 600 seconds on %s' % base_name)
else:
logger.info('rate limiting feedback metric, removing check as %s has been checked in the last 600 seconds' % (
base_name))
remove_feedback_metric_check = True
if remove_feedback_metric_check:
metric_check_file = '%s/%s' % (settings.IONOSPHERE_CHECK_PATH, i_metric_check_file)
self.remove_metric_check_file(str(metric_check_file))
# Determine metric_var_files after possible feedback metric removals
metric_var_files = False
try:
metric_var_files = [f for f in listdir(settings.IONOSPHERE_CHECK_PATH) if isfile(join(settings.IONOSPHERE_CHECK_PATH, f))]
except:
logger.error('error :: failed to list files in check dir')
logger.info(traceback.format_exc())
if metric_var_files:
ionosphere_job = True
break
# @added 20170113 - Feature #1854: Ionosphere learn
# Added the learn variable to spawn a spawn_learn_process when
# required.
work_queue_items = 0
if settings.IONOSPHERE_LEARN:
learn_work = None
try:
learn_work = self.redis_conn.smembers('ionosphere.learn.work')
except Exception as e:
logger.error('error :: could not query Redis for ionosphere.learn.work - %s' % e)
if learn_work:
work_queue_items = len(learn_work)
if work_queue_items > 0:
learn_job = True
if learn_job:
break
# @added 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Do not run an Ionosphere and echo checks on a metrics when a lot of
# checks are being done. Manage the Ionosphere load and increased
# runtime in general that Ionosphere echo has introduced, especially
# when Ionosphere is issued lots of checks, if lots of metrics suddenly
# become anomalous.
metric_var_files_count = 0
ionosphere_busy = False
if ionosphere_job:
metric_var_files_sorted = sorted(metric_var_files)
metric_check_file = '%s/%s' % (settings.IONOSPHERE_CHECK_PATH, str(metric_var_files_sorted[0]))
# @added 20190403 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Added a count of the number of checks to be done
metric_var_files_count = len(metric_var_files)
# If there are more than 4 metric check files set Ionosphere to
# busy so that Ionosphere alternates between checking the normal
# Ionosphere Mirage features profiles and the Ionosphere echo
# features profiles on subsequent checks of a metric so that
# when Ionosphere is busy it is not checking both sets of
# features profiles on every run.
if metric_var_files_count > 4:
ionosphere_busy = True
# @added 20170108 - Feature #1830: Ionosphere alerts
# Adding lists of smtp_alerter_metrics and ionosphere_non_smtp_alerter_metrics
# Timed this takes 0.013319 seconds on 689 unique_metrics
unique_metrics = []
try:
unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get the unique_metrics list from Redis')
unique_metrics = []
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# The Redis analyzer.smtp_alerter_metrics list is created here to
# replace the self.ionosphere_smtp_alerter_metrics Manager.list in the below
# section
ionosphere_smtp_alerter_metrics = []
try:
ionosphere_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere_smtp_alerter_metrics Redis set')
ionosphere_smtp_alerter_metrics = []
redis_sets_to_rename = [
'ionosphere.ionosphere_smtp_alerter_metrics',
'ionosphere.ionosphere_non_smtp_alerter_metrics'
]
for current_redis_set in redis_sets_to_rename:
new_redis_set = '%s.old' % current_redis_set
try:
self.redis_conn.rename(current_redis_set, new_redis_set)
except Exception as e:
if str(e) == 'no such key':
logger.info('could not rename Redis set %s to %s: %s' % (
current_redis_set, new_redis_set, str(e)))
else:
logger.error('error :: could not rename Redis set %s to %s: %s' % (
current_redis_set, new_redis_set, str(e)))
for metric_name in unique_metrics:
base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
for alert in settings.ALERTS:
pattern_match = False
if str(alert[1]) == 'smtp':
ALERT_MATCH_PATTERN = alert[0]
METRIC_PATTERN = base_name
pattern_match = False
try:
# Match by regex
alert_match_pattern = re.compile(ALERT_MATCH_PATTERN)
pattern_match = alert_match_pattern.match(METRIC_PATTERN)
if pattern_match:
pattern_match = True
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name not in self.ionosphere_smtp_alerter_metrics:
if base_name not in ionosphere_smtp_alerter_metrics:
# self.ionosphere_smtp_alerter_metrics.append(base_name)
redis_set = 'ionosphere.ionosphere_smtp_alerter_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
except:
pattern_match = False
if not pattern_match:
# Match by substring
if alert[0] in base_name:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name not in self.ionosphere_smtp_alerter_metrics:
# self.ionosphere_smtp_alerter_metrics.append(base_name)
if base_name not in ionosphere_smtp_alerter_metrics:
redis_set = 'ionosphere.ionosphere_smtp_alerter_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name not in self.ionosphere_smtp_alerter_metrics:
# if base_name not in self.ionosphere_smtp_alerter_metrics:
# self.ionosphere_non_smtp_alerter_metrics.append(base_name)
if base_name not in ionosphere_smtp_alerter_metrics:
redis_set = 'ionosphere.ionosphere_non_smtp_alerter_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# The Redis lists are used here to replace the self.ionosphere_
# Manager().list()
ionosphere_smtp_alerter_metrics = []
try:
ionosphere_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere_smtp_alerter_metrics Redis set')
ionosphere_smtp_alerter_metrics = []
ionosphere_non_smtp_alerter_metrics = []
try:
ionosphere_non_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_non_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere_non_smtp_alerter_metrics Redis set')
ionosphere_non_smtp_alerter_metrics = []
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# logger.info('smtp_alerter_metrics :: %s' % str(len(self.ionosphere_smtp_alerter_metrics)))
# logger.info('ionosphere_non_smtp_alerter_metrics :: %s' % str(len(self.ionosphere_non_smtp_alerter_metrics)))
logger.info('smtp_alerter_metrics :: %s' % str(len(ionosphere_smtp_alerter_metrics)))
logger.info('ionosphere_non_smtp_alerter_metrics :: %s' % str(len(ionosphere_non_smtp_alerter_metrics)))
if ionosphere_job:
# @added 20190326 - Feature #2484
# First process ionosphere_echo to create any missing
try:
ionosphere_echo_enabled = settings.IONOSPHERE_ECHO_ENABLED
except:
ionosphere_echo_enabled = False
# @added 20190403 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# If there are more than 2 metric check files, do not run
# process_ionosphere_echo to create echo features profiles
run_process_ionosphere_echo = True
if metric_var_files_count > 2:
run_process_ionosphere_echo = False
logger.info(
'not running process_ionosphere_echo as there are %s metric check files to be checked' % (
str(metric_var_files_count)))
# @added 20190527 - Feature #2484: FULL_DURATION feature profiles
# Branch #3002: docker
# Only process if there is a ionosphere.unique_metrics Redis set
if run_process_ionosphere_echo:
ionosphere_unique_metrics = []
try:
ionosphere_unique_metrics = self.redis_conn.smembers('ionosphere.unique_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get Redis smembers ionosphere.unique_metrics')
ionosphere_unique_metrics = []
# @added 20190527 - Feature #2484: FULL_DURATION feature profiles
if not ionosphere_unique_metrics:
logger.info('there are metrics in the Redis ionosphere.unique_metrics set, skipping process_ionosphere_echo')
# If there are more than 4 metric check files set Ionosphere to
# busy so that Ionosphere alternates between checking the normal
# Ionosphere Mirage features profiles and the Ionosphere echo
# features profiles on subsequent checks of a metric so that
# when Ionosphere is busy it is not checking both sets of
# features profiles on every run.
if metric_var_files_count > 4:
ionosphere_busy = True
if ionosphere_echo_enabled and run_process_ionosphere_echo:
# Spawn a single process_ionosphere_echo process
function_name = 'process_ionosphere_echo'
pids = []
spawned_pids = []
pid_count = 0
now = time()
for i in range(1, IONOSPHERE_PROCESSES + 1):
try:
p = Process(target=self.process_ionosphere_echo, args=(i, metric_check_file))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s' % (
str(pid_count), str(IONOSPHERE_PROCESSES),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s' % function_name)
continue
# Self monitor the process and terminate if the
# process_ionosphere_echo has run for too long
try:
ionosphere_echo_max_fp_create_time = settings.IONOSPHERE_ECHO_MAX_FP_CREATE_TIME
except:
ionosphere_echo_max_fp_create_time = 55
p_starts = time()
while time() - p_starts <= ionosphere_echo_max_fp_create_time:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'%s %s completed in %.2f seconds' % (
str(IONOSPHERE_PROCESSES),
function_name, time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing all %s processes' % (function_name))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('killed %s process' % (function_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all %s processes' % function_name)
logger.info('processing - %s' % str(metric_var_files_sorted[0]))
function_name = 'spin_process'
# @added 20170109 - Feature #1854: Ionosphere learn
# Added the learn variable to spawn a spawn_learn_process when
# required.
# @added 20170112 - Feature #1854: Ionosphere learn - Redis ionosphere.learn.work namespace
# Ionosphere learn needs Redis works sets
# When a features profile is created there needs to be work added to a Redis
# set
# When a human makes a features profile, we want Ionosphere to make a
# use_full_duration_days features profile valid_learning_duration (e.g.
# 3361) later.
if learn_job:
logger.info('processing - learn work queue - %s' % str(work_queue_items))
function_name = 'spawn_learn_process'
# Spawn processes
pids = []
spawned_pids = []
pid_count = 0
now = time()
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# for i in range(1, settings.IONOSPHERE_PROCESSES + 1):
for i in range(1, IONOSPHERE_PROCESSES + 1):
if ionosphere_job:
try:
# @modified 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Added ionosphere_busy if there are queued checks
# to ensure that Ionosphere echo is rate limited if a
# lot of metrics become anomalous and that Ionosphere
# alternates between normal Mirage features profiles
# comparisons and Ionosphere echo features profiles
# during busy times.
# p = Process(target=self.spin_process, args=(i, metric_check_file))
p = Process(target=self.spin_process, args=(i, metric_check_file, ionosphere_busy))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s' % (
str(pid_count),
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# str(settings.IONOSPHERE_PROCESSES),
str(IONOSPHERE_PROCESSES),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s' % function_name)
continue
# @added 20170113 - Feature #1854: Ionosphere learn - Redis ionosphere.learn.work namespace
if learn_job:
try:
p = Process(target=self.spawn_learn_process, args=(i, int(now)))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s' % (
str(pid_count),
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# str(settings.IONOSPHERE_PROCESSES),
str(IONOSPHERE_PROCESSES),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s' % function_name)
continue
# Self monitor processes and terminate if any spin_process has run
# for to long
p_starts = time()
# @modified 20180621 - Feature #2404: Ionosphere - fluid approximation
# Increase run time to 55 seconds to allow for Min-Max scaling
# while time() - p_starts <= 20:
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Added ionosphere_echo which takes more time
# while time() - p_starts <= 55:
try:
ionosphere_max_runtime = settings.IONOSPHERE_MAX_RUNTIME
except:
ionosphere_max_runtime = 120
while time() - p_starts <= ionosphere_max_runtime:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'%s %s completed in %.2f seconds' % (
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# str(settings.IONOSPHERE_PROCESSES),
str(IONOSPHERE_PROCESSES),
function_name, time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing all %s processes' % (function_name))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('killed %s process' % (function_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all %s processes' % function_name)
if ionosphere_job:
check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_name - %s' % check_file_name)
check_file_timestamp = check_file_name.split('.', 1)[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_timestamp - %s' % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split('.', 1)[1]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_txt - %s' % check_file_metricname_txt)
check_file_metricname = check_file_metricname_txt.replace('.txt', '')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname - %s' % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace('.', '/')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_dir - %s' % check_file_metricname_dir)
metric_failed_check_dir = '%s/%s/%s' % (failed_checks_dir, check_file_metricname_dir, check_file_timestamp)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
for p in pids:
if p.is_alive():
logger.info('stopping %s - %s' % (function_name, str(p.is_alive())))
p.join()
# @added 20170108 - Feature #1830: Ionosphere alerts
# Reset added lists of ionospehere_smtp_alerter_metrics and
# ionosphere_non_smtp_alerter_metrics
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.ionosphere_smtp_alerter_metrics[:] = []
# self.ionosphere_non_smtp_alerter_metrics[:] = []
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
# delete_redis_sets = [
# 'ionosphere.ionosphere_smtp_alerter_metrics',
# 'ionosphere.ionosphere_non_smtp_alerter_metrics',
# ]
delete_redis_sets = [
'ionosphere.ionosphere_smtp_alerter_metrics.old',
'ionosphere.ionosphere_non_smtp_alerter_metrics.old',
]
for i_redis_set in delete_redis_sets:
redis_set_to_delete = i_redis_set
try:
self.redis_conn.delete(redis_set_to_delete)
logger.info('deleted Redis set - %s' % redis_set_to_delete)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to delete Redis set - %s' % redis_set_to_delete)
|
test_cuttlepool.py
|
# -*- coding: utf-8 -*-
"""
CuttlePool tests.
"""
import gc
import threading
import time
import pytest
# Travis CI uses pytest v2.9.2 for Python 3.3 tests. Any fixtures that yield
# a resource using pytest <= v2.9.2 should use yield_fixture explicitly,
# otherwise use fixture as per the docs.
if int(pytest.__version__.split('.')[0]) >= 3:
pytest.yield_fixture = pytest.fixture
from cuttlepool import (_ResourceTracker, CuttlePool, Resource, PoolEmptyError,
PoolFullError)
import mockresource
class MockPool(CuttlePool):
def normalize_resource(self, resource):
pass
def ping(self, resource):
return resource.open
class SubResource(Resource):
pass
@pytest.fixture
def capacity():
return 5
@pytest.fixture
def overflow():
return 1
@pytest.fixture
def pool(capacity, overflow):
"""A CuttlePool instance."""
p = MockPool(mockresource.factory, capacity=capacity, overflow=overflow)
return p
@pytest.fixture
def rtracker(pool):
"""A _ResourceTracker instance."""
rt, _ = pool._make_resource()
return rt
@pytest.yield_fixture
def resource(pool):
"""A Resource instance."""
r = pool.get_resource()
yield r
r.close()
def test_nonpositive_capacity():
"""Test error is raised when nonpositive capacity is specified."""
with pytest.raises(ValueError):
MockPool(mockresource.factory, capacity=0)
def test_negative_overflow():
"""Test error is raised when negative overflow is specified."""
with pytest.raises(ValueError):
MockPool(mockresource.factory, capacity=1, overflow=-1)
def test_improper_timeout():
"""Test error is raised for improper timeout argument."""
with pytest.raises(ValueError):
MockPool(mockresource.factory, capacity=1, timeout=-1)
with pytest.raises(TypeError):
MockPool(mockresource.factory, capacity=1, timeout=-0.1)
def test_resource_wrapper():
"""
Test the proper Resource subclass is returned from ``get_resource()``.
"""
pool = MockPool(
mockresource.factory, capacity=1, resource_wrapper=SubResource)
r = pool.get_resource()
assert isinstance(r, SubResource)
def test_empty(pool):
"""Test if pool is empty."""
assert pool.empty()
r = pool.get_resource()
assert pool.empty()
r.close()
assert not pool.empty()
def test_resource_wrapper_get_resource(pool):
"""
Test the proper Resource subclass is returned from ``get_resource()``.
"""
r = pool.get_resource(resource_wrapper=SubResource)
assert isinstance(r, SubResource)
def test_get_empty(pool):
"""Test the pool raises a ``PoolEmptyError``."""
with pytest.raises(PoolEmptyError):
pool._get(0)
def test_get(pool, resource):
"""Test ``_get()`` gets a resource."""
resource.close() # Returns resource to pool.
rt, _ = pool._get(0)
assert isinstance(rt, _ResourceTracker)
def test_get_wait():
def worker(r):
time.sleep(5)
r.close()
pool = MockPool(mockresource.factory, capacity=1)
resource = pool.get_resource()
t = threading.Thread(target=worker, args=(resource, ))
t.start()
rt, _ = pool._get(None)
assert isinstance(rt, _ResourceTracker)
def test_get_tracker(pool, rtracker):
"""Test the resource tracker for a resource is returned."""
rt = pool._get_tracker(rtracker.resource)
assert rt is rtracker
def test_harvest_lost_resources(pool):
"""Test unreferenced resources are returned to the pool."""
def get_resource_id():
"""
Ensures ``Resource`` falls out of scope before calling
``_harvest_lost_resources()``.
"""
return id(pool.get_resource()._resource)
r_id = get_resource_id()
# Run garbage collection to ensure ``Resource`` created in
# ``get_resource_id()`` is destroyed.
gc.collect()
pool._harvest_lost_resources()
assert r_id == id(pool.get_resource()._resource)
def test_make_resource(pool):
"""
Test the resource object returned from _make_resource is the proper class
instance.
"""
r, _ = pool._make_resource()
assert pool.size == 1
assert isinstance(r, _ResourceTracker)
def test_put_full():
"""Test ``PoolFullError`` is raised."""
pool = MockPool(mockresource.factory, capacity=1, overflow=1)
r1 = pool.get_resource()
r2 = pool.get_resource()
print(pool.capacity, pool._available)
pool._put(pool._get_tracker(r1._resource))
with pytest.raises(PoolFullError):
pool._put(pool._get_tracker(r2._resource))
def test_put(pool, rtracker):
"""Test ``_put()`` returns resource to pool."""
assert pool._available == 0
pool._put(rtracker)
assert pool._available == 1
def test_remove(pool, rtracker):
"""Test ``_remove()`` removes resource from pool."""
pool._remove(rtracker)
assert pool.size == pool._available == 0
assert list(filter(None, pool._reference_queue)) == []
def test_unavailable_range(pool):
"""Test proper generator returned for unused pool."""
assert list(pool._unavailable_range()) == [x for x in range(pool.maxsize)]
def test_unavailable_range_depleted_pool(pool):
"""Test generator when pool is depleted."""
resources = [pool.get_resource() for _ in range(pool.maxsize)]
assert list(pool._unavailable_range()) == [x for x in range(pool.maxsize)]
def test_unavailable_range_wraps(pool, capacity):
"""
Test generator uses correct indices when ``_resource_start`` is less than
``_resource_end``.
"""
# Create capacity resources, then return them to the pool. This makes
# _resource_end == capacity.
resources = [pool.get_resource() for _ in range(capacity)]
[r.close() for r in resources]
# Get a resource, which makes _resource_start == 1.
r = pool.get_resource()
# The unavailable range starts at _resource_end (5) and wraps around to
# _resource_start (1, exclusive).
unavailable = list(range(capacity, pool.maxsize))
unavailable.extend(range(pool._resource_start))
assert list(pool._unavailable_range()) == unavailable
def test_get_resource(pool):
"""
Test the resource object returned from get_resource is the
proper class instance.
"""
r = pool.get_resource()
assert isinstance(r, Resource)
def test_get_resource_overflow(pool):
"""
Test the pool creates proper number of overflow resources properly.
"""
rs = []
for _ in range(pool.maxsize):
rs.append(pool.get_resource())
assert pool.size == pool.maxsize
for r in rs:
r.close()
assert pool.size == pool.capacity
def test_get_resource_depleted(pool):
"""Test the pool will return a resource once one is available."""
def worker(pool):
r = pool.get_resource()
time.sleep(5)
r.close()
for _ in range(pool.maxsize):
t = threading.Thread(target=worker, args=(pool, ))
t.start()
time.sleep(2)
r = pool.get_resource()
def test_get_resource_depleted_error():
"""Test the pool will raise an error when depleted."""
pool = MockPool(mockresource.factory, capacity=1, timeout=1)
with pytest.raises(PoolEmptyError):
rt = []
while True:
rt.append(pool.get_resource())
def test_normalize_resource():
"""
Test that the normalize_resource method is properly called on
resources returned from get_resource.
"""
class Normalize(MockPool):
def normalize_resource(self, resource):
setattr(resource, 'one', 1)
pool = Normalize(mockresource.factory, capacity=1)
r = pool.get_resource()
r_id = id(r._resource)
r.one = 2
assert r.one == 2
r.close()
r2 = pool.get_resource()
r2_id = id(r2._resource)
assert (r2.one == 1 and r_id == r2_id)
def test_ping(pool):
"""
Test that the ping method is properly called on resources returned
from get_resource.
"""
r = pool.get_resource()
r_id = id(r._resource)
r._resource.close() # Close the underlying resource object.
r.close() # Return the resource to the pool.
# Calling get_resource() should create a new resource object since
# the previous one (which is the only one currently in the pool) is not
# open.
r2 = pool.get_resource()
r2_id = id(r2._resource)
assert r_id != r2_id
def test_put_resource(pool):
"""
Test that the resource is properly returned to the pool.
"""
r = pool.get_resource()
r_id = id(r._resource)
pool.put_resource(r._resource)
assert id(pool.get_resource()._resource) == r_id
def test_with_resource(pool):
"""Test Resource context manager."""
with pool.get_resource() as r:
assert isinstance(r, Resource)
assert r._resource is None
r2 = pool.get_resource()
with r2:
assert isinstance(r2, Resource)
assert r2._resource is None
def test_resource_available(pool, rtracker):
"""
Test a resource is properly tracked by a ``_ResourceTracker`` instance.
"""
assert rtracker.available()
r = rtracker.wrap_resource(pool, Resource)
assert not rtracker.available()
del r
gc.collect()
assert rtracker.available()
def test_wrap_resource(pool, rtracker):
"""
Test a resource is properly wrapped and referenced by
``_ResourceTracker``.
"""
r = rtracker.wrap_resource(pool, Resource)
assert isinstance(r, Resource)
assert rtracker._weakref() is not None
def test_resource_getattr_setattr(resource):
"""Test that attributes are set on the underlying resource object."""
resource.one = 1
assert resource.one == 1
assert 'one' not in resource.__dict__
assert resource._resource.one == 1
assert 'one' in resource._resource.__dict__
assert resource.one == resource._resource.one
def test_close(pool):
"""Test the close method of a Resource object."""
r = pool.get_resource()
r.close()
assert r._resource is None
assert r._pool is None
def test_recycling(pool):
"""
Test no errors are raised for multiple rounds of getting and putting. Kind
of a "catch all" to make sure no errors crop up when resources are
recycled.
"""
# Recycle pool repeatedly in single thread.
for _ in range(5):
rs = [pool.get_resource() for _ in range(pool.maxsize)]
# Close resource in different order than retrieved.
rs.reverse()
for r in rs:
r.close()
# Recycle pool repeatedly in multiple threads.
def worker(pool):
for _ in range(5):
r = pool.get_resource()
r.close()
threads = []
for _ in range(5):
t = threading.Thread(target=worker, args=(pool, ))
t.start()
threads.append(t)
for t in threads:
t.join()
assert pool._available == pool.size == pool.capacity
|
HTTPDownloader.py
|
# Written by John Hoffman
# see LICENSE.txt for license information
from BitTornado.CurrentRateMeasure import Measure
from random import randint
from urlparse import urlparse
from httplib import HTTPConnection
from urllib import quote
from threading import Thread
from BitTornado.__init__ import product_name,version_short
try:
True
except:
True = 1
False = 0
EXPIRE_TIME = 60 * 60
VERSION = product_name+'/'+version_short
class haveComplete:
def complete(self):
return True
def __getitem__(self, x):
return True
haveall = haveComplete()
class SingleDownload:
def __init__(self, downloader, url):
self.downloader = downloader
self.baseurl = url
try:
(scheme, self.netloc, path, pars, query, fragment) = urlparse(url)
except:
self.downloader.errorfunc('cannot parse http seed address: '+url)
return
if scheme != 'http':
self.downloader.errorfunc('http seed url not http: '+url)
return
try:
self.connection = HTTPConnection(self.netloc)
except:
self.downloader.errorfunc('cannot connect to http seed: '+url)
return
self.seedurl = path
if pars:
self.seedurl += ';'+pars
self.seedurl += '?'
if query:
self.seedurl += query+'&'
self.seedurl += 'info_hash='+quote(self.downloader.infohash)
self.measure = Measure(downloader.max_rate_period)
self.index = None
self.url = ''
self.requests = []
self.request_size = 0
self.endflag = False
self.error = None
self.retry_period = 30
self._retry_period = None
self.errorcount = 0
self.goodseed = False
self.active = False
self.cancelled = False
self.resched(randint(2,10))
def resched(self, len = None):
if len is None:
len = self.retry_period
if self.errorcount > 3:
len = len * (self.errorcount - 2)
self.downloader.rawserver.add_task(self.download, len)
def _want(self, index):
if self.endflag:
return self.downloader.storage.do_I_have_requests(index)
else:
return self.downloader.storage.is_unstarted(index)
def download(self):
self.cancelled = False
if self.downloader.picker.am_I_complete():
self.downloader.downloads.remove(self)
return
self.index = self.downloader.picker.next(haveall, self._want)
if ( self.index is None and not self.endflag
and not self.downloader.peerdownloader.has_downloaders() ):
self.endflag = True
self.index = self.downloader.picker.next(haveall, self._want)
if self.index is None:
self.endflag = True
self.resched()
else:
self.url = ( self.seedurl+'&piece='+str(self.index) )
self._get_requests()
if self.request_size < self.downloader.storage._piecelen(self.index):
self.url += '&ranges='+self._request_ranges()
rq = Thread(target = self._request)
rq.setDaemon(False)
rq.start()
self.active = True
def _request(self):
import encodings.ascii
import encodings.punycode
import encodings.idna
self.error = None
self.received_data = None
try:
self.connection.request('GET',self.url, None,
{'User-Agent': VERSION})
r = self.connection.getresponse()
self.connection_status = r.status
self.received_data = r.read()
except Exception, e:
self.error = 'error accessing http seed: '+str(e)
try:
self.connection.close()
except:
pass
try:
self.connection = HTTPConnection(self.netloc)
except:
self.connection = None # will cause an exception and retry next cycle
self.downloader.rawserver.add_task(self.request_finished)
def request_finished(self):
self.active = False
if self.error is not None:
if self.goodseed:
self.downloader.errorfunc(self.error)
self.errorcount += 1
if self.received_data:
self.errorcount = 0
if not self._got_data():
self.received_data = None
if not self.received_data:
self._release_requests()
self.downloader.peerdownloader.piece_flunked(self.index)
if self._retry_period:
self.resched(self._retry_period)
self._retry_period = None
return
self.resched()
def _got_data(self):
if self.connection_status == 503: # seed is busy
try:
self.retry_period = max(int(self.received_data),5)
except:
pass
return False
if self.connection_status != 200:
self.errorcount += 1
return False
self._retry_period = 1
if len(self.received_data) != self.request_size:
if self.goodseed:
self.downloader.errorfunc('corrupt data from http seed - redownloading')
return False
self.measure.update_rate(len(self.received_data))
self.downloader.measurefunc(len(self.received_data))
if self.cancelled:
return False
if not self._fulfill_requests():
return False
if not self.goodseed:
self.goodseed = True
self.downloader.seedsfound += 1
if self.downloader.storage.do_I_have(self.index):
self.downloader.picker.complete(self.index)
self.downloader.peerdownloader.check_complete(self.index)
self.downloader.gotpiecefunc(self.index)
return True
def _get_requests(self):
self.requests = []
self.request_size = 0L
while self.downloader.storage.do_I_have_requests(self.index):
r = self.downloader.storage.new_request(self.index)
self.requests.append(r)
self.request_size += r[1]
self.requests.sort()
def _fulfill_requests(self):
start = 0L
success = True
while self.requests:
begin, length = self.requests.pop(0)
if not self.downloader.storage.piece_came_in(self.index, begin,
self.received_data[start:start+length]):
success = False
break
start += length
return success
def _release_requests(self):
for begin, length in self.requests:
self.downloader.storage.request_lost(self.index, begin, length)
self.requests = []
def _request_ranges(self):
s = ''
begin, length = self.requests[0]
for begin1, length1 in self.requests[1:]:
if begin + length == begin1:
length += length1
continue
else:
if s:
s += ','
s += str(begin)+'-'+str(begin+length-1)
begin, length = begin1, length1
if s:
s += ','
s += str(begin)+'-'+str(begin+length-1)
return s
class HTTPDownloader:
def __init__(self, storage, picker, rawserver,
finflag, errorfunc, peerdownloader,
max_rate_period, infohash, measurefunc, gotpiecefunc):
self.storage = storage
self.picker = picker
self.rawserver = rawserver
self.finflag = finflag
self.errorfunc = errorfunc
self.peerdownloader = peerdownloader
self.infohash = infohash
self.max_rate_period = max_rate_period
self.gotpiecefunc = gotpiecefunc
self.measurefunc = measurefunc
self.downloads = []
self.seedsfound = 0
def make_download(self, url):
self.downloads.append(SingleDownload(self, url))
return self.downloads[-1]
def get_downloads(self):
if self.finflag.isSet():
return []
return self.downloads
def cancel_piece_download(self, pieces):
for d in self.downloads:
if d.active and d.index in pieces:
d.cancelled = True
|
run_benchmarks.py
|
import os, sys, subprocess, re, time, threading
last_benchmarked_commit_file = '.last_benchmarked_commit'
def log(msg):
print(msg)
FNULL = open(os.devnull, 'w')
benchmark_runner = os.path.join('build', 'release', 'benchmark', 'benchmark_runner')
out_file = 'out.csv'
log_file = 'out.log'
benchmark_results_folder = 'benchmark_results'
benchmark_info_folder = os.path.join(benchmark_results_folder, 'info')
default_start_commit = "9ea358b716b33a4929348dc96acbaa16436d83fa"
# 5 minute timeout per benchmark
total_timeout = 300
def get_current_git_version():
proc = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)
return proc.stdout.readline().rstrip()
def pull_new_changes():
proc = subprocess.Popen(['git', 'pull'], stdout=FNULL)
proc.wait()
def build_optimized():
log("Starting optimized build")
proc = subprocess.Popen(['make', 'opt', 'imdb','-j'], stdout=FNULL, stderr=subprocess.PIPE)
proc.wait()
if proc.returncode != 0:
print("Failed to compile, moving on to next commit")
while True:
line = proc.stderr.readline()
if line == '':
break
print(line);
return False
else:
log("Finished optimized build")
return True
def get_list_of_commits(until_commit=None):
proc = subprocess.Popen(['git', 'checkout', 'origin/master'], stdout=subprocess.PIPE)
proc.wait()
list = []
commit_regex = re.compile('commit ([a-z0-9]{40})')
proc = subprocess.Popen(['git', 'log'], stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if line == '':
break
match = commit_regex.search(line)
if match != None:
commit_number = match.groups()[0]
if commit_number == until_commit:
break
list.append(commit_number)
return list
def switch_to_commit(commit_number):
proc = subprocess.Popen(['git', 'checkout', commit_number])
proc.wait()
return proc.returncode == 0
def get_benchmark_list():
list = []
proc = subprocess.Popen([benchmark_runner, '--list'], stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if line == '':
break
list.append(line.rstrip())
return list
# get a folder for the new benchmark
# folders have the format ID-commit
def make_benchmark_folder(commit):
# get all the current folders
files = os.listdir(benchmark_results_folder)
biggest_number = 0
for f in files:
try:
number = int(f.split('-')[0])
if number > biggest_number:
biggest_number = number
except:
pass
folder_name = os.path.join(benchmark_results_folder, "%04d-%s" % (biggest_number + 1, commit))
os.mkdir(folder_name)
return folder_name
class RunBenchmark(object):
def __init__(self, benchmark, file_name, log_file, stdout_name, stderr_name, out_file):
self.benchmark = benchmark
self.file_name = file_name
self.log_file = log_file
self.stdout_name = stdout_name
self.stderr_name = stderr_name
self.out_file = out_file
self.proc = None
def run(self, timeout):
def run_benchmark_target(self):
self.proc.wait()
self.proc = subprocess.Popen(['timeout', '300', benchmark_runner, '--out=' + self.out_file, '--log=' + self.log_file, self.benchmark], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
thread = threading.Thread(target=run_benchmark_target, args=(self,))
thread.start()
thread.join(timeout)
self.stdout = self.proc.stdout.read()
self.stderr = self.proc.stderr.read()
if thread.is_alive():
log("Force terminating process...");
self.proc.kill()
thread.join()
return 1
return self.proc.returncode
def run_benchmark(benchmark, folder):
log("Starting benchmark " + benchmark);
base_path = os.path.join(folder, benchmark)
file_name = base_path + ".csv"
log_file = base_path + ".log"
stdout_name = base_path + ".stdout.log"
stderr_name = base_path + ".stderr.log"
runner = RunBenchmark(benchmark, file_name, log_file, stdout_name, stderr_name, out_file)
return_code = runner.run(total_timeout)
if return_code != 0:
log("Failed to run benchmark " + benchmark);
# failed to run benchmark
with open(runner.file_name, 'w+') as f:
f.write("CRASH")
else:
log("Succeeded in running benchmark " + benchmark);
# succeeded, copy results to output directory
os.rename(out_file, file_name)
with open(stdout_name, 'w+') as f:
f.write(runner.stdout)
with open(stderr_name, 'w+') as f:
f.write(runner.stderr)
def write_benchmark_info(benchmark, folder):
file = os.path.join(folder, benchmark + '.log')
# benchmark, write info
log("Write benchmark info " + benchmark);
proc = subprocess.Popen([benchmark_runner, '--info', benchmark], stdout=subprocess.PIPE)
output = proc.stdout.read()
print(output)
with open(file, 'w+') as f:
f.write(output)
if os.path.exists(last_benchmarked_commit_file):
with open(last_benchmarked_commit_file, 'r') as f:
default_start_commit = f.read().rstrip()
pull_new_changes()
# get a list of all commits to benchmark
list = get_list_of_commits(default_start_commit)
list.reverse()
list = list[0:1]
if len(list) == 0:
exit(1)
# create a folder for the benchmark results, if it doesn't exist yet
try:
os.mkdir(benchmark_results_folder)
os.mkdir(benchmark_info_folder)
except:
pass
for commit in list:
default_start_commit = commit
log("Benchmarking commit " + commit)
# switch to this commit in the source tree
if not switch_to_commit(commit):
log("Failed to switch to commit! Moving to next commit")
continue
# now try to compile it
if not build_optimized():
continue
# make a benchmark folder for this commit
benchmark_folder = make_benchmark_folder(commit)
log("Writing to folder: " + benchmark_folder)
# now run the benchmarks
benchmarks_to_run = get_benchmark_list()
for benchmark in benchmarks_to_run:
write_benchmark_info(benchmark, benchmark_info_folder)
run_benchmark(benchmark, benchmark_folder)
# successfully benchmarked this commit, write to file
with open(last_benchmarked_commit_file, 'w+') as f:
f.write(commit)
|
sauce.py
|
import csv
import os
import subprocess
import threading
# Gather the packages to test.
PREFIX = './packages/node_modules/'
CISCOSPARK = os.path.join(PREFIX, '@ciscospark')
WEBEX = os.path.join(PREFIX, '@webex')
PROD_ENV_VARS = {
# 'ACL_SERVICE_URL': 'https://acl-a.wbx2.com/acl/api/v1', ?
'ATLAS_SERVICE_URL': 'https://atlas-a.wbx2.com/admin/api/v1',
'CONVERSATION_SERVICE': 'https://conv-a.wbx2.com/conversation/api/v1',
'ENCRYPTION_SERVICE_URL': 'https://encryption-a.wbx2.com',
'IDBROKER_BASE_URL': 'https://idbroker.webex.com',
'IDENTITY_BASE_URL': 'https://identity.webex.com',
'U2C_SERVICE_URL': 'https://u2c.wbx2.com/u2c/api/v1',
'WDM_SERVICE_URL': 'https://wdm-a.wbx2.com/wdm/api/v1',
# Logging
'ENABLE_VERBOSE_NETWORK_LOGGING': 'true',
# Enable CI for Sauce Labs
'CI': 'true'
}
INT_ENV_VARS = {
# Environments
'ACL_SERVICE_URL': 'https://acl-intb.ciscospark.com/acl/api/v1',
'ATLAS_SERVICE_URL': 'https://atlas-intb.ciscospark.com/admin/api/v1',
'CONVERSATION_SERVICE': 'https://conversation-intb.ciscospark.com/conversation/api/v1',
'ENCRYPTION_SERVICE_URL': 'https://encryption-intb.ciscospark.com/encryption/api/v1',
# Do not use 'https://hydra-intb.ciscospark.com/v1' for Hydra. CI expects 'apialpha'.
'HYDRA_SERVICE_URL': 'https://apialpha.ciscospark.com/v1/',
'IDBROKER_BASE_URL': 'https://idbrokerbts.webex.com',
'IDENTITY_BASE_URL': 'https://identitybts.webex.com',
'U2C_SERVICE_URL': 'https://u2c-intb.ciscospark.com/u2c/api/v1',
'WDM_SERVICE_URL': 'https://wdm-intb.ciscospark.com/wdm/api/v1',
'WHISTLER_API_SERVICE_URL': 'https://whistler.onint.ciscospark.com/api/v1',
# Logging
'ENABLE_VERBOSE_NETWORK_LOGGING': 'true',
# Enable CI for Sauce Labs
'CI': 'true'
}
OUTPUT_DIR = 'output'
OUTPUT_FILE_PATH = os.path.join(OUTPUT_DIR, 'test-comparison.csv')
TEST_COMMAND = 'npm run sauce:run -- npm test -- --packages %s'
SKIP_PACKAGES = [
'@webex/test-helper-server' # no tests
'@webex/internal-plugin-calendar', # no tests
'@webex/plugin-webhooks' # no tests
]
def should_include_package(path_name, name):
scoped_name = os.path.join(os.path.basename(path_name), name)
return os.path.isdir(os.path.join(path_name, name)) and scoped_name not in SKIP_PACKAGES
def get_package_names(path_name):
namespace = path_name.replace(PREFIX, '')
return [os.path.join(namespace, name) for name in os.listdir(path_name) if should_include_package(path_name, name)]
def run_subprocess(bash_command, env_vars):
env = os.environ.copy()
env.update(env_vars)
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE, env=env)
output, error = process.communicate()
return process.returncode # , output, error
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_result(return_code, prefix='Tests are a...'):
if return_code == 0:
print(bcolors.OKGREEN + prefix + 'success.' + bcolors.ENDC)
else:
print(bcolors.FAIL + prefix + 'failure.' + bcolors.ENDC)
def run_test(package, environment):
env_vars = INT_ENV_VARS if environment is 'integration' else PROD_ENV_VARS
print(bcolors.OKBLUE + 'Testing `%s` on %s...' % (package, environment) + bcolors.ENDC)
bash_command = TEST_COMMAND % package
return_code = run_subprocess(bash_command, env_vars)
print_result(return_code, prefix='Testing `%s` on %s...' % (package, environment))
return return_code
def run_env_tests(package, writer, csv_file):
prod_return_code = run_test(package, 'production')
int_return_code = run_test(package, 'integration')
writer.writerow([package, prod_return_code, int_return_code])
csv_file.flush()
def run_tests_in_sequence(packages, writer, csv_file):
for package in packages:
run_env_tests(package, writer, csv_file)
def run_tests_in_parallel(packages, writer, csv_file):
threads = [threading.Thread(target=run_env_tests, args=(package, writer, csv_file)) for package in packages]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def main():
ciscospark_packages = get_package_names(CISCOSPARK)
webex_packages = get_package_names(WEBEX)
packages = ciscospark_packages + webex_packages
print ('Skipping %d packages: %s' % (len(SKIP_PACKAGES), ', '.join(SKIP_PACKAGES)))
print('Testing %d packages...' % len(packages))
try:
os.mkdir(OUTPUT_DIR)
except OSError:
pass
threads = []
with open(OUTPUT_FILE_PATH, 'wb') as csv_file:
writer = csv.writer(csv_file, quoting=csv.QUOTE_MINIMAL)
writer.writerow(['Package', 'Production exit code', 'Integration exit code'])
run_tests_in_sequence(packages, writer, csv_file)
print('Wrote output to: %s' % OUTPUT_FILE_PATH)
print('Done.')
if __name__ == "__main__":
main()
|
test_selenium.py
|
import re
import threading
import time
import unittest
from selenium import webdriver
from app import create_app, db
from app.models import Role, User, Post
class SeleniumTestCase(unittest.TestCase):
client = None
@classmethod
def setUpClass(cls):
try:
cls.client = webdriver.Chrome()
except:
pass
# skip these tests if the browser could not be started
if cls.client:
cls.app = create_app('testing')
cls.app_context = cls.app.app_context()
cls.app_context.push()
# suppress logging to keep unittest output clean
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel("ERROR")
# create the database and populate with some fake data
db.create_all()
Role.insert_roles()
User.generate_fake(10)
Post.generate_fake(10)
# add an administrator user
admin_role = Role.query.filter_by(permissions=0xff).first()
admin = User(email='stan@example.com',
username='stan', password='cat',
role=admin_role, confirmed=True)
db.session.add(admin)
db.session.commit()
# start the Flask server in a thread
threading.Thread(target=cls.app.run).start()
# give the server a second to ensure it is up
time.sleep(1)
@classmethod
def tearDownClass(cls):
if cls.client:
# stop the flask server and the browser
cls.client.get('http://localhost:5000/shutdown')
cls.client.close()
# destroy database
db.drop_all()
db.session.remove()
# remove application context
cls.app_context.pop()
def setUp(self):
if not self.client:
self.skipTest('Web browser not available')
def tearDown(self):
pass
def test_admin_home_page(self):
# navigate to home page
self.client.get('http://localhost:5000/')
self.assertTrue(re.search(r'新朋友', self.client.page_source))
# navigate to login page
self.client.find_element_by_link_text('登录').click()
self.assertTrue('<h1>登录</h1>' in self.client.page_source)
# login
self.client.find_element_by_name('email').\
send_keys('stan@example.com')
self.client.find_element_by_name('password').send_keys('cat')
self.client.find_element_by_name('submit').click()
self.assertTrue(re.search(r'stan', self.client.page_source))
# navigate to the user's profile page
self.client.find_element_by_link_text('资料').click()
self.assertTrue('<h1>stan</h1>' in self.client.page_source)
|
batch.py
|
"""
OEvent: Oscillation event detection and feature analysis.
batch.py - runs analysis on a set of files
Written by Sam Neymotin (samuel.neymotin@nki.rfmh.org) & Idan Tal (idan.tal@nki.rfmh.org)
References: Taxonomy of neural oscillation events in primate auditory cortex
https://doi.org/10.1101/2020.04.16.045021
"""
from pylab import *
import sys,os,numpy,subprocess
from math import ceil
import multiprocessing
import matplotlib.gridspec as gridspec
import shutil
from subprocess import Popen, PIPE, call
import pickle
myhost = os.uname()[1]
defQSZ = 1 # default queue size for batch
if myhost == 'cycle': defQSZ = 3 # if on cycle, have more RAM so bigger qsz
if myhost == 'a1dat': defQSZ = 32 # if on gcp a1dat have more RAM (3.75 TB)
# append line s to filepath fn
def appline (s,fn):
fp = open(fn,"a"); fp.write(s + "\n"); fp.close()
# check that the batch dir exists
def checkdir (d):
try:
if not os.path.exists(d): os.mkdir(d)
return True
except:
print("could not create directory :",d)
return False
# make a list of the sims that have already had their output saved, can then
# pass it into batchRun to skip those sims
def getSkipList (whichParams):
lsec,lopt,lval = whichParams()
sidx,lskip = -1,[]
for i in range(len(lopt[0])):
if lopt[0][i] == 'simstr':
sidx = i
break
if sidx == -1:
print("no simstr found!")
return None
for i in range(len(lval)):
if os.path.exists("./data/" + lval[i][sidx] + "_.npz"):
lskip.append(i)
return lskip
# run a batch using multiprocessing - which calls mpiexec - single simulation then split across nodes
# based on http://www.bryceboe.com/2011/01/28/the-python-multiprocessing-queue-and-large-objects/
def batchRun (lmyargs,blog,skip=[],qsz=defQSZ,bdir="./batch",pyf="load.py"):
if not checkdir(bdir): return False
jobs = multiprocessing.Queue()
shutil.copy(pyf, bdir) # make a backup copy of py file -- but use local copy since has dependencies
def myworker (jobs):
while True:
scomm = jobs.get()
if scomm == None: break
print("worker starting : " , scomm)
os.system(scomm) # worker function, invoked in a process.
for i in range(len(lmyargs)):
if i in skip: continue
cmd = "python3 " + pyf + " ";
args = lmyargs[i]
for arg in args: cmd += arg + ' '
print('command is',cmd)
appline(cmd,blog)
jobs.put(cmd)
workers = []
for i in range(qsz):
jobs.put(None)
tmp = multiprocessing.Process(target=myworker, args=(jobs,))
tmp.start()
workers.append(tmp)
for worker in workers: worker.join()
return jobs.empty()
#
def getfilesext (basedir,ext):
lfn = os.listdir(basedir)
lfn = [os.path.join(basedir,x) for x in lfn if x.endswith(ext)]
return lfn
def eventbatch (useMUA=False,outbasedir='data/nhpdat/spont/A1/oscoutnew/'):
print('running batch')
lmedthresh = [4.0]
lwinsz = [10]
loverlapth = [0.5]
lbipolar = [0] # [0, 1]
llarg = []
lfnA = getfilesext('data/nhpdat/spont/A1','.mat')
#lfnB = getfilesext('data/nhpdat/spont/Thal','.mat')
lfn = [x for x in lfnA]
#for x in lfnB: lfn.append(x)
freqmin = 0.25 #0.5
freqmax = 250.0
freqstep = 0.25 #0.5
useDynThresh = 0
dorun = doquit = 1
endfctr = 0.5
mspecwidth = 7.0
dolaggedcoh = docfc = dolaggedcohnoband = dosim = 0
for overlapth in loverlapth:
for medthresh in lmedthresh:
for winsz in lwinsz:
for bipolar in lbipolar:
for fn in lfn:
larg = [fn,str(bipolar),str(medthresh),str(winsz),str(overlapth),\
str(freqmin),str(freqmax), str(freqstep), str(useDynThresh),\
str(dorun), str(doquit), str(dolaggedcoh), str(mspecwidth),str(docfc),str(dolaggedcohnoband),\
str(endfctr),str(dosim),str(useMUA),outbasedir]
llarg.append(larg)
batchRun(llarg,'batch.log')
def simbatch (): # not used currently - did not finish setup of load.py for this
print('running batch')
lmedthresh = [4.0]
lwinsz = [10]
loverlapth = [0.5]
lbipolar = [0]
llarg = []
lfnA = getfilesext('data/nhpdat/spont/A1','.mat')
lfn = [x for x in lfnA]
freqmin = 0.25
freqmax = 250.0
freqstep = 0.25
useDynThresh = 0
dorun = doquit = 1
for overlapth in loverlapth:
for medthresh in lmedthresh:
for winsz in lwinsz:
for bipolar in lbipolar:
for fn in lfn:
larg = [fn,str(bipolar),str(medthresh),str(winsz),str(overlapth),\
str(freqmin),str(freqmax), str(freqstep), str(useDynThresh),\
str(dorun), str(doquit)]
llarg.append(larg)
batchRun(llarg,'batch.log')
def laggedcohbatch ():
# lagged coherence batch
medthresh = 4.0
winsz = 10
overlapth = 0.5
llarg = []
lfnA = getfilesext('data/nhpdat/spont/A1','.mat')
lfnB = getfilesext('data/nhpdat/spont/Thal','.mat')
lfn = [x for x in lfnA]
for x in lfnB: lfn.append(x)
freqmin = 0.5
freqmax = 250.0
freqstep = 0.5
useDynThresh = 0
dorun = doquit = dolaggedcoh = 1
bipolar = 0
for fn in lfn:
larg = [fn,str(bipolar),str(medthresh),str(winsz),str(overlapth),\
str(freqmin),str(freqmax), str(freqstep), str(useDynThresh),\
str(dorun), str(doquit), str(dolaggedcoh)]
llarg.append(larg)
batchRun(llarg,'batch.log',qsz=defQSZ)
def laggedcohnobandbatch ():
# lagged coherence without frequency bands (narrowband) batch
medthresh = 4.0
winsz = 10
overlapth = 0.5
llarg = []
lfnA = getfilesext('data/nhpdat/spont/A1','.mat')
lfnB = getfilesext('data/nhpdat/spont/Thal','.mat')
lfn = [x for x in lfnA]
for x in lfnB: lfn.append(x)
freqmin = 0.5
freqmax = 250.0
freqstep = 0.5
useDynThresh = 0
dorun = doquit = 1
dolaggedcoh = 0
mspecwidth = 7.0
docfc = 0
dolaggedcohnoband = 1
bipolar = 0
for fn in lfn:
larg = [fn,str(bipolar),str(medthresh),str(winsz),str(overlapth),\
str(freqmin),str(freqmax), str(freqstep), str(useDynThresh),\
str(dorun), str(doquit), str(dolaggedcoh),str(mspecwidth),str(docfc),str(dolaggedcohnoband)]
llarg.append(larg)
batchRun(llarg,'batch.log',qsz=int(defQSZ*1.5))
#
def loadddcv2 (skipcsd=False,skipbipolar=False,lar=['A1','STG'],basedir='data/nhpdat/spont/oscout'):
from nhpdat import getflayers
ddcv2={}
for ar in lar:
ddcv2[ar]={}
if ar == 'A1' or ar == 'Thal':
bdir = 'data/nhpdat/spont/A1/oscoutnew/'+ar
else:
bdir = 'data/hecog/spont/oscout/'
lfn = os.listdir(bdir)
for fn in lfn:
if fn.endswith('ddcv2.pkl'):
if skipbipolar and fn.count('bipolar_True') > 0: continue
if skipcsd and fn.count('bipolar_False') > 0: continue
if ar == 'A1':
fnorig = 'data/nhpdat/spont/'+ar + '/' + fn.split('_bipolar')[0]
#print(fnorig)
s2,g,i1 = getflayers(fnorig,abbrev=True)
if s2 == -1: continue
ddcv2[ar][fn] = pickle.load(open(bdir+'/'+fn,'rb'))
return ddcv2
#
def plotddcv2byband (ddcv2,ar,dkey,skipbipolar=True,clr='k',bins=30,xlab=r'$CV^2$',xl=(0,3),histtype='bar',lw=4):
lband = ['delta','theta','alpha','beta','lgamma','gamma','hgamma']
lval = []
for bdx,b in enumerate(lband):
v = []
for k in ddcv2[ar].keys():
if type(k)==str:
if skipbipolar and k.count('bipolar_True') > 0: continue
dcv2 = ddcv2[ar][k]
lchan = list(dcv2.keys())
lchan.sort()
for c in lchan:
if type(dcv2[c][b][dkey])==list:
if len(dcv2[c][b][dkey])>0 and type(dcv2[c][b][dkey][0])==list:
for l in dcv2[c][b][dkey]:
for x in l:
if not isnan(x):
v.append(x)
else:
for x in dcv2[c][b][dkey]:
if not isnan(x):
v.append(x)
else:
if not isnan(dcv2[c][b][dkey]):
v.append(dcv2[c][b][dkey])
ax = subplot(3,3,bdx+1)
hist(v,density=True,bins=bins,color=clr,histtype=histtype,linewidth=lw)
s = ar + ' ' + b + '\nmedian:' + str(round(median(v),2))+ ' mean:' + str(round(mean(v),2))
title(s)#,fontsize=45)
if xl is not None: xlim(xl)
mv = mean(v)
plot([mv,mv],[0,ax.get_ylim()[1]],'r--')
md = median(v)
plot([md,md],[0,ax.get_ylim()[1]],'b--')
if b == 'gamma' or b == 'hgamma': xlabel(xlab)#,fontsize=45)
lval.append(v)
return lval
#
def plotddcv2bybandchan (ddcv2,ar,dkey,skipbipolar=True,clr='k',bins=30,xlab=r'$CV^2$',xl=(0,3),histtype='bar',lw=4):
lband = ['delta','theta','alpha','beta','lgamma','gamma','hgamma']
for bdx,b in enumerate(lband):
v = []
print(ddcv2[ar].keys())
for chan in ddcv2[ar].keys():
dcv2 = ddcv2[ar][chan]
print(b,chan,dkey,dcv2.keys())
if type(dcv2[b][dkey])==list:
for x in dcv2[b][dkey]:
if not isnan(x):
v.append(x)
else:
if not isnan(dcv2[b][dkey]):
v.append(dcv2[b][dkey])
subplot(3,2,bdx+1)
hist(v,normed=True,bins=bins,color=clr,histtype=histtype,linewidth=lw)
s = ar + ' ' + b + ' median:' + str(round(median(v),2))+ ' mean:' + str(round(mean(v),2))
title(s)
xlim(xl)
if b == 'gamma' or b == 'hgamma': xlabel(xlab)
#
def loaddframebyarband (lcol,skipbipolar=True,skipcsd=False,FoctTH=1.5,ERPscoreTH=0.8,ERPDurTH=[75,300]):
# loads the pandas data frames split up by frequency band
lar = ['A1', 'Thal']
based = 'data/nhpdat/spont/oscout/'
ddf = {'A1':{'s2':{},'g':{},'i1':{}},'Thal':{'Th':{}}}
for ar,lschan in zip(lar,[['s2','g','i1'],['Th']]):
for schan in lschan:
for b in lband:
ddf[ar][schan][b]={k:[] for k in lcol}
for ar in lar:
for fn in os.listdir(based+ar):
if getorigsampr('data/nhpdat/spont/'+ar+'/'+fn.split('_')[0]) != 44e3: continue
if not fn.endswith('dframe.pkl'): continue
if skipbipolar and fn.count('bipolar_True')>0: continue
if skipcsd and fn.count('bipolar_False')>0: continue
df = pickle.load(open(based+ar+'/'+fn,'rb'))
print(fn)
lchan = list(set(df['chan']))
lchan.sort()
if ar == 'A1':
s2,g,i1 = lchan
lschan = ['s2','g','i1']
else:
th = lchan[0]
lschan = ['Th']
for band in lband:
for chan,schan in zip(lchan,lschan):
dfs = df[(df.band==band) & (df.Foct<FoctTH) & (df.chan==chan) & ((df.ERPscore<ERPscoreTH)|(df.dur<ERPDurTH[0])|(df.dur>ERPDurTH[1]))]
for k in lcol:
lx = dfs[k]
for x in lx: ddf[ar][schan][band][k].append(x)
return ddf
# plot
def plotdframebyarband (ddf,kcol,lband=['delta','theta','alpha','beta','lgamma','gamma','hgamma'],\
lar=['A1','STG'],llschan=[['s2','g','i1'],['104']],\
llclr=[['r','g','b'],['c']],\
llab=['A1 supragran','A1 gran','A1 infragran','Human STG'],lcflat=['r','g','b','c'],drawlegend=True,ylab=None,msz=40):
import matplotlib.patches as mpatches
dtitle = {b:'' for b in lband}
dlm = {ar:{ch:[] for ch in lsch} for ar,lsch in zip(lar,llschan)} # 'A1':{'s2':[],'g':[],'i1':[]},'Thal':{'Th':[]}}
dls = {ar:{ch:[] for ch in lsch} for ar,lsch in zip(lar,llschan)}
from nhpdat import dbands
xfreq = [(dbands[k][1]+dbands[k][0])/2. for k in dbands.keys()]
for ar,lsch,lclr in zip(lar,llschan,llclr):
for schan in lsch:
for bdx,b in enumerate(lband):
dlm[ar][schan].append(mean(ddf[ar][schan][b][kcol]))
dls[ar][schan].append(std(ddf[ar][schan][b][kcol])/sqrt(len(ddf[ar][schan][b][kcol])))
for ar,lsch,lclr in zip(lar,llschan,llclr):
for schan,clr in zip(lsch,lclr):
plot(xfreq,np.array(dlm[ar][schan])-dls[ar][schan],clr+'--')
plot(xfreq,np.array(dlm[ar][schan])+dls[ar][schan],clr+'--')
plot(xfreq,dlm[ar][schan],clr)
plot(xfreq,dlm[ar][schan],clr+'o',markersize=msz)
xlabel('Frequency (Hz)');
if ylab is None:
ylabel(kcol)
else:
ylabel(ylab)
ax=gca()
lpatch = [mpatches.Patch(color=c,label=s) for c,s in zip(lcflat,llab)]
if drawlegend: ax.legend(handles=lpatch,handlelength=1)
return dlm,dls
# plot
def plotdframebyarbandhist (ddf,kcol,lband=['delta','theta','alpha','beta','lgamma','gamma','hgamma'],xl=None,xlab=None,ylab=None,\
lar=['A1','Thal'],llschan=[['s2','g','i1'],['Th']],\
llclr=[['r','g','b'],['c']],\
llab=['A1 supragran','A1 gran','A1 infragran','Thal'],lcflat=['r','g','b','c'],bins=20):
import matplotlib.patches as mpatches
dtitle = {b:'' for b in lband}
dlm = {ar:{ch:[] for ch in lsch} for ar,lsch in zip(lar,llschan)} # mean
dls = {ar:{ch:[] for ch in lsch} for ar,lsch in zip(lar,llschan)} # standard error
dlmin = {ar:{ch:[] for ch in lsch} for ar,lsch in zip(lar,llschan)} # min
dlmax = {ar:{ch:[] for ch in lsch} for ar,lsch in zip(lar,llschan)} # max
dlmed = {ar:{ch:[] for ch in lsch} for ar,lsch in zip(lar,llschan)} # median
dlN = {ar:{ch:[] for ch in lsch} for ar,lsch in zip(lar,llschan)} # median
from nhpdat import dbands
xfreq = [(dbands[k][1]+dbands[k][0])/2. for k in dbands.keys()]
for ar,lsch,lclr in zip(lar,llschan,llclr):
for schan,clr in zip(lsch,lclr):
for bdx,b in enumerate(lband):
subplot(3,2,bdx+1); title(b)
hist(ddf[ar][schan][b][kcol],density=True,histtype='step',linewidth=10,color=clr,bins=bins)
if xl is not None: xlim(xl)
if xlab is not None: xlabel(xlab)
if ylab is not None: ylabel(ylab)
dlm[ar][schan].append(mean(ddf[ar][schan][b][kcol]))
dls[ar][schan].append(std(ddf[ar][schan][b][kcol])/sqrt(len(ddf[ar][schan][b][kcol])))
dlmin[ar][schan].append(min(ddf[ar][schan][b][kcol]))
dlmax[ar][schan].append(max(ddf[ar][schan][b][kcol]))
dlmed[ar][schan].append(median(ddf[ar][schan][b][kcol]))
dlN[ar][schan].append(len(ddf[ar][schan][b][kcol]))
print(ar,schan,clr,b,kcol,dlN[ar][schan][-1],dlmin[ar][schan][-1],dlmax[ar][schan][-1],dlmed[ar][schan][-1],dlm[ar][schan][-1],dls[ar][schan][-1])
ax=gca()
lpatch = [mpatches.Patch(color=c,label=s) for c,s in zip(lcflat,llab)]
ax.legend(handles=lpatch,handlelength=1)
return dlm,dls,dlmin,dlmax,dlmed,dlN
#
def loaddlcoh (lband = ['delta','theta','alpha','beta','gamma','hgamma'], skipbipolar = True,\
ar='A1', bdir='data/nhpdat/spont/laggedcoh/A1',origdir='data/nhpdat/spont/A1/',lschan=['s2','g','i1']):
# loads lagged coherence output into dictionaries
from nhpdat import getorigsampr
ddlcoh = {}
ddlcoh[ar] = {}
lfn = os.listdir(bdir)
for fn in lfn:
if skipbipolar and fn.count('bipolar_True') > 0: continue
origfn = origdir+fn.split('_')[0]
if ar == 'A1' and getorigsampr(origfn) < 44e3: continue
if fn.endswith('.pkl'): ddlcoh[ar][fn] = pickle.load(open(bdir+'/'+fn,'rb'))
dlcoh = {ar:{schan:{} for schan in lschan}}
for c in lschan:
for b in lband:
dlcoh[ar][c][b]=[]
for k in ddlcoh[ar].keys():
for chan,schan in zip(ddlcoh[ar][k].keys(),lschan):
for b in lband:
for x in ddlcoh[ar][k][chan][b]: dlcoh[ar][schan][b].append(x)
return ddlcoh,dlcoh
def plotdlcoh (dlcoh,lband=['delta','theta','alpha','beta','gamma','hgamma'],\
ar='A1',lschan=['s2','g','i1'],lclr=['r','g','b'],dolegend=True):
# plot lagged coherence output as line plot
import matplotlib.patches as mpatches
dlm = {ar:{schan:[] for schan in lschan}}
dls = {ar:{schan:[] for schan in lschan}}
from nhpdat import dbands
xfreq = [(dbands[k][1]+dbands[k][0])/2. for k in dbands.keys()]
for ar,lsch,lclr in zip([ar],[lschan],[lclr]):
for schan in lsch:
for bdx,b in enumerate(lband):
dlm[ar][schan].append(mean(dlcoh[ar][schan][b]))
dls[ar][schan].append(std(dlcoh[ar][schan][b])/sqrt(len(dlcoh[ar][schan][b])))
for ar,lsch,lclr in zip([ar],[lschan],[lclr]):
for schan,clr in zip(lsch,lclr):
plot(xfreq,np.array(dlm[ar][schan])-dls[ar][schan],clr+'--')
plot(xfreq,np.array(dlm[ar][schan])+dls[ar][schan],clr+'--')
plot(xfreq,dlm[ar][schan],clr)
plot(xfreq,dlm[ar][schan],clr+'o',markersize=40)
xlabel('Frequency (Hz)',fontsize=45); ylabel('Lagged Coherence',fontsize=45)
if dolegend:
ax=gca()
lpatch = [mpatches.Patch(color=c,label=s) for c,s in zip(lclr,['NHP A1 supragranular','NHP A1 granular','NHP A1 infragranular'])]
ax.legend(handles=lpatch,handlelength=1)
return dlm,dls
if __name__ == "__main__":
batchty = 0
useMUA = 0
outbasedir = 'data/nhpdat/spont/A1/oscoutnew/'
if len(sys.argv) > 1: batchty = int(sys.argv[1])
if len(sys.argv) > 2: useMUA = int(sys.argv[2])
if len(sys.argv) > 3: outbasedir = sys.argv[3]
if batchty == 0:
print('eventbatch',batchty,useMUA,outbasedir)
eventbatch(useMUA=useMUA,outbasedir=outbasedir)
elif batchty == 1:
print('laggedcohbatch')
laggedcohbatch()
elif batchty == 2:
print('laggedcohnobandbatch')
laggedcohnobandbatch()
elif batchty == 3:
print('simbatch')
simbatch()
|
load.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import struct
import threading
from ._teradatapt import EncoderError, InvalidCredentialsError, MLoad, TeradataError as TeradataPTError
from .constants import *
from .errors import *
from .cmd import TeradataCmd
from .connection import Connection, Context
from .encoders import DateHandler, null_handler
from .fmt import format_table
from .io import ArchiveFileReader, CSVReader, FileReader, JSONReader, Reader
from .logging import log
from .utils import get_version_info, pipeline, suppress_context
from ._compat import *
__all__ = ['BulkLoad']
error_table_count = """
select a.errorcode as code, a.errorfield as field, b.errortext as text, count(*) over (partition by a.errorcode, a.errorfield) as errcount
from {0}_e1 a
join dbc.errormsgs b
on a.errorcode = b.errorcode
group by 1,2,3;
"""
error_table_sample = """
select
a.errorcode as code,
a.errorfield as field,
min(substr(a.hostdata, 0, 30000)) as hostdata
from {0}_e1 a
qualify row_number() over (partition by a.errorcode, a.errorfield order by a.errorcode asc)=1
group by 1,2;
"""
class TeradataBulkLoad(Connection):
"""
The class for using the TPT API's UPDATE (MLoad) driver to insert a
large (> ~100k rows) amount of data into an existing Teradata table.
Exposed under the alias :class:`giraffez.BulkLoad`.
:param str table: The name of the target table for loading.
:param str host: Omit to read from :code:`~/.girafferc` configuration file.
:param str username: Omit to read from :code:`~/.girafferc` configuration file.
:param str password: Omit to read from :code:`~/.girafferc` configuration file.
:param int log_level: Specify the desired level of output from the job.
Possible values are :code:`giraffez.SILENCE`, :code:`giraffez.INFO` (default),
:code:`giraffez.VERBOSE`, and :code:`giraffez.DEBUG`
:param str config: Specify an alternate configuration file to be read from,
when previous paramaters are omitted.
:param str key_file: Specify an alternate key file to use for configuration decryption
:param string dsn: Specify a connection name from the configuration file to be
used, in place of the default.
:param bool protect: If authentication with Teradata fails and :code:`protect` is :code:`True`,
locks the connection used in the configuration file. This can be unlocked using the
command :code:`giraffez config --unlock <connection>`, changing the connection password,
or via the :meth:`~giraffez.config.Config.unlock_connection` method.
:param bool coerce_floats: Coerce Teradata decimal types into Python floats
:param bool cleanup: Attempt to cleanup all work tables
when context exits.
:param bool print_error_table: Prints a user-friendly version of the mload
error table to stderr.
:raises `giraffez.errors.InvalidCredentialsError`: if the supplied credentials are incorrect
:raises `giraffez.TeradataPTError`: if the connection cannot be established
If the target table is currently under an MLoad lock (such as if the
previous operation failed), a :code:`release mload` statement will be
executed on the table, and the load job will be re-attempted.
Meant to be used, where possible, with python's :code:`with` context handler
to guarantee that connections will be closed gracefully when operation
is complete.
"""
checkpoint_interval = 50000
def __init__(self, table=None, host=None, username=None, password=None,
log_level=INFO, config=None, key_file=None, dsn=None, protect=False,
coerce_floats=False, cleanup=False, print_error_table=False):
super(TeradataBulkLoad, self).__init__(host, username, password, log_level, config, key_file,
dsn, protect)
# Attributes used with property getter/setters
self._columns = None
self._table_name = None
self.initiated = False
self.finished = False
self.coerce_floats = coerce_floats
self.perform_cleanup = cleanup
self.exit_code = None
self.applied_count = 0
self.error_count = 0
#: The amount of time spent in idle (waiting for server)
self.idle_time = 0
#: Prints the error table when there is an issue, good for troubleshooting jobs
self.print_error_table = print_error_table
self.preprocessor = lambda s: s
if table is not None:
self.table = table
def checkpoint(self):
"""
Execute a checkpoint while loading rows. Called automatically
when loading from a file. Updates the exit code of the driver to
reflect errors.
"""
return self.mload.checkpoint()
def cleanup(self):
"""
Drops any existing work tables, as returned by
:meth:`~giraffez.load.TeradataBulkLoad.tables`.
:raises `giraffez.TeradataPTError`: if a Teradata error ocurred
"""
threads = []
for i, table in enumerate(filter(lambda x: self.mload.exists(x), self.tables)):
log.info("BulkLoad", "Dropping table '{}'...".format(table))
t = threading.Thread(target=self.mload.drop_table, args=(table,))
threads.append(t)
t.start()
for t in threads:
t.join()
@property
def columns(self):
"""
The list of columns in use.
:getter: Return the list of columns in use.
:setter: Set the columns to be loaded into, as well as their order. If
loading from a file, these will be determined from the file header.
Not necessary if you are loading into all columns, in the original
order. The value must be a :code:`list` of names in the order that
the fields of data will be presented in each row.
Raises :class:`~giraffez.errors.GiraffeError` if :code:`field_names`
is not a :code:`list`.
Raises :class:`~giraffez.errors.GiraffeError` if the target table
has not been set.
:type: :class:`~giraffez.types.Columns`
"""
return self._columns
@columns.setter
def columns(self, field_names):
if not isinstance(field_names, list):
raise GiraffeError("Must set .columns property as type <List>")
fields = []
for field in field_names:
field = field.lower()
if field in fields:
raise GiraffeError("Cannot set duplicate column: '{}'".format(field))
fields.append(field)
self._columns = fields
def finish(self):
"""
Finishes the load job. Called automatically when the connection closes.
:return: The exit code returned when applying rows to the table
"""
if self.finished:
return self.exit_code
checkpoint_status = self.checkpoint()
self.exit_code = self._exit_code()
if self.exit_code != 0:
raise TeradataPTError("BulkLoad job finished with return code '{}'".format(self.exit_code))
# TODO(chris): should this happen every time?
if self.applied_count > 0:
self._end_acquisition()
self._apply_rows()
self.exit_code = self._exit_code()
if self.exit_code != 0:
raise TeradataPTError("BulkLoad job finished with return code '{}'".format(self.exit_code))
self.finished = True
return self.exit_code
def from_file(self, filename, table=None, delimiter='|', null='NULL',
panic=True, quotechar='"', parse_dates=False):
"""
Load from a file into the target table, handling each step of the
load process.
Can load from text files, and properly formatted giraffez archive
files. In both cases, if Gzip compression is detected the file will be
decompressed while reading and handled appropriately. The encoding is
determined automatically by the contents of the file.
It is not necessary to set the columns in use prior to loading from a file.
In the case of a text file, the header is used to determine column names
and their order. Valid delimiters include '|', ',', and '\\t' (tab). When
loading an archive file, the column information is decoded alongside the data.
:param str filename: The location of the file to be loaded
:param str table: The name of the target table, if it was not specified
to the constructor for the isntance
:param str null: The string that indicates a null value in the rows being
inserted from a file. Defaults to 'NULL'
:param str delimiter: When loading a file, indicates that fields are
separated by this delimiter. Defaults to :code:`None`, which causes the
delimiter to be determined from the header of the file. In most
cases, this behavior is sufficient
:param str quotechar: The character used to quote fields containing special characters,
like the delimiter.
:param bool panic: If :code:`True`, when an error is encountered it will be
raised. Otherwise, the error will be logged and :code:`self.error_count`
is incremented.
:return: The output of the call to
:meth:`~giraffez.load.TeradataBulkLoad.finish`
:raises `giraffez.errors.GiraffeError`: if table was not set and :code:`table`
is :code:`None`, or if a Teradata error ocurred while retrieving table info.
:raises `giraffez.errors.GiraffeEncodeError`: if :code:`panic` is :code:`True` and there
are format errors in the row values.
"""
if not self.table:
if not table:
raise GiraffeError("Table must be set or specified to load a file.")
self.table = table
if not isinstance(null, basestring):
raise GiraffeError("Expected 'null' to be str, received {}".format(type(null)))
with Reader(filename, delimiter=delimiter, quotechar=quotechar) as f:
if not isinstance(f.delimiter, basestring):
raise GiraffeError("Expected 'delimiter' to be str, received {}".format(type(delimiter)))
self.columns = f.header
if isinstance(f, ArchiveFileReader):
self.mload.set_encoding(ROW_ENCODING_RAW)
self.preprocessor = lambda s: s
if parse_dates:
self.preprocessor = DateHandler(self.columns)
self._initiate()
self.mload.set_null(null)
self.mload.set_delimiter(delimiter)
i = 0
for i, line in enumerate(f, 1):
self.put(line, panic=panic)
if i % self.checkpoint_interval == 1:
log.info("\rBulkLoad", "Processed {} rows".format(i), console=True)
checkpoint_status = self.checkpoint()
self.exit_code = self._exit_code()
if self.exit_code != 0:
return self.exit_code
log.info("\rBulkLoad", "Processed {} rows".format(i))
return self.finish()
def put(self, items, panic=True):
"""
Load a single row into the target table.
:param list items: A list of values in the row corresponding to the
fields specified by :code:`self.columns`
:param bool panic: If :code:`True`, when an error is encountered it will be
raised. Otherwise, the error will be logged and :code:`self.error_count`
is incremented.
:raises `giraffez.errors.GiraffeEncodeError`: if :code:`panic` is :code:`True` and there
are format errors in the row values.
:raises `giraffez.errors.GiraffeError`: if table name is not set.
:raises `giraffez.TeradataPTError`: if there is a problem
connecting to Teradata.
"""
if not self.initiated:
self._initiate()
try:
row_status = self.mload.put_row(self.preprocessor(items))
self.applied_count += 1
except (TeradataPTError, EncoderError) as error:
self.error_count += 1
if panic:
raise error
log.info("BulkLoad", error)
def read_error_table(self):
with TeradataCmd(log_level=log.level, config=self.config, key_file=self.key_file,
dsn=self.dsn, silent=True) as cmd:
result = list(cmd.execute((error_table_count + error_table_sample).format(self.table)))
if len(result) == 0:
log.info("No error information available.")
return
return result
def release(self):
"""
Attempt release of target mload table.
:raises `giraffez.errors.GiraffeError`: if table was not set by
the constructor, the :code:`TeradataBulkLoad.table`, or
:meth:`~giraffez.load.TeradataBulkLoad.from_file`.
"""
if self.table is None:
raise GiraffeError("Cannot release. Target table has not been set.")
log.info("BulkLoad", "Attempting release for table {}".format(self.table))
self.mload.release(self.table)
@property
def status(self):
return self.mload.status()
@property
def tables(self):
"""
The names of the work tables used for loading.
:return: A list of four tables, each the name of the target table
with the added suffixes, "_wt", "_log", "_e1", and "_e2"
:raises `giraffez.errors.GiraffeError`: if table was not set by
the constructor, the :code:`TeradataBulkLoad.table`, or
:meth:`~giraffez.load.TeradataBulkLoad.from_file`.
"""
if self.table is None:
raise GiraffeError("Target table has not been set.")
return [
"{}_wt".format(self.table),
"{}_log".format(self.table),
"{}_e1".format(self.table),
"{}_e2".format(self.table),
]
@property
def table(self):
"""
The name of the target table.
:getter: Returns the name of the target table, or :code:`None` if it
has not been set.
:setter: Set the name of the target table, if the table name
was not given to the constructor of the
:class:`~giraffez.load.TeradataBulkLoad` instance or
:meth:`~giraffez.load.TeradataBulkLoad.from_file`. The value given
must include all qualifiers such as database name.
Raises :class:`~giraffez.errors.GiraffeError` if the MLoad connection has
already been initiated, or the :class:`~giraffez.cmd.TeradataCmd` connection cannot
be established.
Raises :class:`~giraffez.TeradataPTError` if the column data could not be
retrieved from Teradata
:type: str
"""
return self._table_name
@table.setter
def table(self, table_name):
if self.initiated:
raise GiraffeError("Cannot reuse BulkLoad context for more than one table")
self._table_name = table_name
@property
def total_count(self):
"""
The number of rows applied, plus the number of rows in error.
"""
return self.applied_count + self.error_count
def _apply_rows(self):
log.info("BulkLoad", "Beginning apply phase ...")
self.mload.apply_rows()
self._update_apply_count()
self._update_error_count()
log.info("BulkLoad", "Apply phase ended.")
def _close(self, exc=None):
if not self.initiated:
return
if not exc:
try:
self.finish()
except TeradataPTError as error:
self._close(error)
if self.print_error_table:
for row in self.read_error_table():
print(row)
raise error
log.info("BulkLoad", "Closing Teradata PT connection ...")
self.mload.close()
log.info("BulkLoad", "Teradata PT request complete.")
def _connect(self, host, username, password, logon_mech, logon_mech_data):
self.mload = MLoad(host, username, password, logon_mech, logon_mech_data)
title, version = get_version_info()
query_band = "UTILITYNAME={};VERSION={};".format(title, version)
self.mload.add_attribute(TD_QUERY_BAND_SESS_INFO, query_band)
def _end_acquisition(self):
log.info("BulkLoad", "Ending acquisition phase ...")
self.mload.end_acquisition()
log.info("BulkLoad", "Acquisition phase ended.")
def _exit_code(self):
data = self.mload.get_event(TD_Evt_ExitCode)
if data is None:
log.info("BulkLoad", "Update exit code failed.")
return
return struct.unpack("h", data)[0]
def _initiate(self):
if not self.table:
raise GiraffeError("Table must be set prior to initiating.")
if self.initiated:
raise GiraffeError("Already initiated connection.")
try:
if self.perform_cleanup:
self.cleanup()
elif any(filter(lambda x: self.mload.exists(x), self.tables)):
raise GiraffeError("Cannot continue without dropping previous job tables. Exiting ...")
log.info("BulkLoad", "Initiating Teradata PT request (awaiting server) ...")
start_time = time.time()
self.mload.initiate(self.table, self.columns)
self.idle_time = time.time() - start_time
except InvalidCredentialsError as error:
if args.protect:
Config.lock_connection(args.conf, args.dsn, args.key)
raise error
self.mload.set_null(None)
log.info("BulkLoad", "Teradata PT request accepted.")
self._columns = self.mload.columns()
self.initiated = True
def _update_apply_count(self):
data = self.mload.get_event(TD_Evt_RowCounts64)
if data is None:
log.info("BulkLoad", "Update apply row count failed.")
return
recv, sent, applied = struct.unpack("QQQ", data)
log.debug("Debug[2]", "Event[RowCounts64]: r:{}, s:{}, a:{}".format(recv, sent, applied))
self.applied_count = applied
def _update_error_count(self):
data = self.mload.get_event(TD_Evt_ErrorTable2, 1)
if data is None:
log.info("BulkLoad", "Update error row count failed.")
return
count = struct.unpack("I", data)[0]
log.debug("Debug[2]", "Event[ErrorTable2]: c:{}".format(count))
self.error_count = count
class BulkLoad(Context):
__instance__ = TeradataBulkLoad
|
PC_Miner.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin Python PC Miner (v2.6.1)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
# Import libraries
import sys
from configparser import ConfigParser
from datetime import datetime
from hashlib import sha1
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import environ as osenviron
from platform import machine as osprocessor
from os import path, system
from os import system as ossystem
from pathlib import Path
from platform import system as plsystem
from re import sub
from signal import SIGINT, signal
from socket import socket
from statistics import mean
from subprocess import DEVNULL, Popen, check_call
from threading import Thread as thrThread
from time import ctime, sleep, strptime, time
from multiprocessing import Lock
from random import choice
import pip
import select
thread_lock = Lock()
def install(package):
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
execl(sys.executable, sys.executable, *sys.argv)
def now():
# Return datetime object
return datetime.now()
try:
# Check if cpuinfo is installed
import cpuinfo
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Cpuinfo is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"py-cpuinfo\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("py-cpuinfo")
try:
# Check if requests is installed
import requests
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Requests is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "requests" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('requests')
try:
# Check if colorama is installed
from colorama import Back, Fore, Style, init
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Colorama is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"colorama\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("colorama")
try:
# Check if pypresence is installed
from pypresence import Presence
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Pypresence is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"pypresence\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("pypresence")
try:
# Check if xxhash is installed
import xxhash
xxhash_enabled = True
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Xxhash is not installed - "
+ "Xxhash support will be disabled")
xxhash_enabled = False
# Global variables
MINER_VER = "2.61" # Version number
NODE_ADDRESS = "server.duinocoin.com"
AVAILABLE_PORTS = [
2813, # PC (1)
2814, # PC (2)
2815, # PC (3)
2812, # Wallets, other miners
2811 # Legacy
]
SOC_TIMEOUT = 45 # Socket timeout
PERIODIC_REPORT_TIME = 60
RESOURCES_DIR = "PCMiner_" + str(MINER_VER) + "_resources"
donatorrunning = False
debug = "n"
discord_presence = "y"
rig_identiier = "None"
requested_diff = "NET"
algorithm = "DUCO-S1"
config = ConfigParser()
donation_level = 0
totalhashrate_mean, thread = [], []
mining_start_time = time()
# Create resources folder if it doesn't exist
if not path.exists(RESOURCES_DIR):
mkdir(RESOURCES_DIR)
# Check if languages file exists
if not Path(RESOURCES_DIR + "/langs.json").is_file():
url = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
r = requests.get(url)
with open(RESOURCES_DIR + "/langs.json", "wb") as f:
f.write(r.content)
# Load language file
with open(RESOURCES_DIR + "/langs.json", "r", encoding="utf8") as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if plsystem() == "Darwin":
if getlocale()[0] is None:
setlocale(LC_ALL, "en_US.UTF-8")
# Check if miner is configured, if it isn't, autodetect language
try:
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("mt"):
lang = "maltese"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("pr"):
lang = "portugese"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("zh"):
lang = "chinese_simplified"
elif locale.startswith("th"):
lang = "thai"
else:
lang = "english"
else:
# Read language variable from configfile
try:
config.read(RESOURCES_DIR + "/Miner_config.cfg")
lang = config["Duino-Coin-PC-Miner"]["language"]
except Exception:
# If it fails, fallback to english
lang = "english"
except:
lang = "english"
def get_string(string_name):
# Get string form language file
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def debug_output(text):
# Debug output
if debug == "y":
print(now().strftime(Style.DIM + "%H:%M:%S.%f ") + "DEBUG: " + text)
def title(title):
# disable window title setter when running with nohup
if osenviron.get('_') != '/usr/bin/nohup':
# Set window title
if osname == "nt":
# Windows systems
system("title " + title)
else:
# Most standard terminals
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def handler(signal_received, frame):
# SIGINT handler
if current_process().name == "MainProcess":
pretty_print(
"sys0",
get_string("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ get_string("goodbye"),
"warning")
try:
# Close previous socket connection (if any)
socket.close()
except Exception:
pass
_exit(0)
def calculate_uptime(start_time):
uptime = time() - start_time
if uptime <= 59:
return str(round(uptime)) + get_string("uptime_seconds")
elif uptime == 60:
return str(round(uptime // 60)) + get_string("uptime_minute")
elif uptime >= 60:
return str(round(uptime // 60)) + get_string("uptime_minutes")
elif uptime == 3600:
return str(round(uptime // 3600)) + get_string("uptime_hour")
elif uptime >= 3600:
return str(round(uptime // 3600)) + get_string("uptime_hours")
def get_prefix(diff: int):
if diff >= 1000000000:
diff = str(round(diff / 1000000000)) + "G"
elif diff >= 1000000:
diff = str(round(diff / 1000000)) + "M"
elif diff >= 1000:
diff = str(round(diff / 1000)) + "k"
return str(diff)
# Enable signal handler
signal(SIGINT, handler)
def Greeting():
# Greeting message
global greeting
print(Style.RESET_ALL)
if requested_diff == "LOW":
diffName = get_string("low_diff_short")
elif requested_diff == "MEDIUM":
diffName = get_string("medium_diff_short")
else:
diffName = get_string("net_diff_short")
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = get_string("greeting_morning")
elif current_hour == 12:
greeting = get_string("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = get_string("greeting_afternoon")
elif current_hour >= 18:
greeting = get_string("greeting_evening")
else:
greeting = get_string("greeting_back")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Fore.YELLOW
+ Style.BRIGHT
+ get_string("banner")
+ Style.RESET_ALL
+ Fore.MAGENTA
+ " (v"
+ str(MINER_VER)
+ ") "
+ Fore.RESET
+ "2019-2021")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.YELLOW
+ "https://github.com/revoxhere/duino-coin")
if lang != "english":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ lang.capitalize()
+ " translation: "
+ Fore.YELLOW
+ get_string("translation_autor"))
try:
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ "CPU: "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(threadcount)
+ "x "
+ str(cpu["brand_raw"]))
except Exception as e:
debug_output("Error displaying CPU message: " + str(e))
# if osname == "nt" or osname == "posix":
# print(
# Style.DIM
# + Fore.YELLOW
# + " ‖ "
# + Style.NORMAL
# + Fore.RESET
# + get_string("donation_level")
# + Style.BRIGHT
# + Fore.YELLOW
# + str(donation_level))
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ get_string("algorithm")
+ Style.BRIGHT
+ Fore.YELLOW
+ algorithm
+ " ⚙ "
+ diffName)
if rig_identiier != "None":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ get_string("rig_identifier")
+ Style.BRIGHT
+ Fore.YELLOW
+ rig_identiier)
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ str(greeting)
+ ", "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ "!\n")
def loadConfig():
# Config loading section
global username
global efficiency
global donation_level
global debug
global threadcount
global requested_diff
global rig_identiier
global lang
global algorithm
global SOC_TIMEOUT
global discord_presence
global PERIODIC_REPORT_TIME
# Initial configuration
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
print(
Style.BRIGHT
+ get_string("basic_config_tool")
+ RESOURCES_DIR
+ get_string("edit_config_file_warning"))
print(
Style.RESET_ALL
+ get_string("dont_have_account")
+ Fore.YELLOW
+ get_string("wallet")
+ Fore.RESET
+ get_string("register_warning"))
username = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string("ask_username")
+ Fore.RESET
+ Style.BRIGHT)
if xxhash_enabled:
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - DUCO-S1 ("
+ get_string("recommended")
+ ")")
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - XXHASH")
algorithm = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string("ask_algorithm")
+ Fore.RESET
+ Style.BRIGHT)
else:
algorithm = "1"
efficiency = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string("ask_intensity")
+ Fore.RESET
+ Style.BRIGHT)
threadcount = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string("ask_threads")
+ str(cpu_count())
+ "): "
+ Fore.RESET
+ Style.BRIGHT)
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - "
+ get_string("low_diff"))
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - "
+ get_string("medium_diff"))
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "3"
+ Style.NORMAL
+ " - "
+ get_string("net_diff"))
requested_diff = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string("ask_difficulty")
+ Fore.RESET
+ Style.BRIGHT)
rig_identiier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string("ask_rig_identifier")
+ Fore.RESET
+ Style.BRIGHT)
if rig_identiier == "y" or rig_identiier == "Y":
rig_identiier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string("ask_rig_name")
+ Fore.RESET
+ Style.BRIGHT)
else:
rig_identiier = "None"
donation_level = "0"
#if osname == "nt" or osname == "posix":
# donation_level = input(
# Style.RESET_ALL
# + Fore.YELLOW
# + get_string("ask_donation_level")
# + Fore.RESET
# + Style.BRIGHT)
# Check whether efficiency is correct
efficiency = sub(r"\D", "", efficiency)
if efficiency == "":
efficiency = 95
elif float(efficiency) > int(100):
efficiency = 100
elif float(efficiency) < int(1):
efficiency = 1
# Check whether threadcount is correct
threadcount = sub(r"\D", "", threadcount)
if threadcount == "":
threadcount = cpu_count()
elif int(threadcount) > int(8):
threadcount = 8
print(
Style.RESET_ALL
+ Style.BRIGHT
+ get_string("max_threads_notice"))
elif int(threadcount) < int(1):
threadcount = 1
# Check whether algo setting is correct
if algorithm == "2":
algorithm = "XXHASH"
else:
algorithm = "DUCO-S1"
# Check whether diff setting is correct
if requested_diff == "1":
requested_diff = "LOW"
elif requested_diff == "2":
requested_diff = "MEDIUM"
else:
requested_diff = "MEDIUM"
# Check whether donation_level is correct
donation_level = sub(r"\D", "", donation_level)
if donation_level == "":
donation_level = 1
elif float(donation_level) > int(5):
donation_level = 5
elif float(donation_level) < int(0):
donation_level = 0
# Format data
config["Duino-Coin-PC-Miner"] = {
"username": username,
"efficiency": efficiency,
"threads": threadcount,
"requested_diff": requested_diff,
"donate": donation_level,
"identifier": rig_identiier,
"algorithm": algorithm,
"language": lang,
"debug": "n",
"soc_timeout": 45,
"periodic_report": 60,
"discord_presence": "y"
}
with open(RESOURCES_DIR + "/Miner_config.cfg", "w") as configfile:
config.write(configfile)
print(Style.RESET_ALL + get_string("config_saved"))
else:
# If config already exists, load data from it
config.read(RESOURCES_DIR + "/Miner_config.cfg")
username = config["Duino-Coin-PC-Miner"]["username"]
efficiency = config["Duino-Coin-PC-Miner"]["efficiency"]
threadcount = config["Duino-Coin-PC-Miner"]["threads"]
requested_diff = config["Duino-Coin-PC-Miner"]["requested_diff"]
donation_level = config["Duino-Coin-PC-Miner"]["donate"]
algorithm = config["Duino-Coin-PC-Miner"]["algorithm"]
rig_identiier = config["Duino-Coin-PC-Miner"]["identifier"]
debug = config["Duino-Coin-PC-Miner"]["debug"]
SOC_TIMEOUT = int(config["Duino-Coin-PC-Miner"]["soc_timeout"])
discord_presence = config["Duino-Coin-PC-Miner"]["discord_presence"]
PERIODIC_REPORT_TIME = int(
config["Duino-Coin-PC-Miner"]["periodic_report"])
efficiency = (100 - float(efficiency)) * 0.01
def ducos1(
lastBlockHash,
expectedHash,
difficulty,
efficiency):
# DUCO-S1 algorithm
# Measure starting time
timeStart = time()
base_hash = sha1(str(lastBlockHash).encode('ascii'))
temp_hash = None
# Loop from 1 too 100*diff
for ducos1res in range(100 * int(difficulty) + 1):
# If efficiency lower than 100% sleep to use less CPU
if (ducos1res % 1000 == 0
and float(100 - efficiency * 100) < 100):
sleep((efficiency)/500)
# Generate hash
temp_hash = base_hash.copy()
temp_hash.update(str(ducos1res).encode('ascii'))
ducos1 = temp_hash.hexdigest()
# Check if result was found
if ducos1 == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1res / timeDelta
return [ducos1res, hashrate]
def ducos1xxh(
lastBlockHash,
expectedHash,
difficulty,
efficiency):
# XXHASH algorithm
# Measure starting time
timeStart = time()
# Loop from 1 too 100*diff
for ducos1xxres in range(100 * int(difficulty) + 1):
# If efficiency lower than 100% sleep to use less CPU
if (ducos1xxres % 1000 == 0
and float(100 - efficiency * 100) < 100):
sleep((efficiency)/500)
# Generate hash
ducos1xx = xxhash.xxh64(
str(lastBlockHash) + str(ducos1xxres), seed=2811)
ducos1xx = ducos1xx.hexdigest()
# Check if result was found
if ducos1xx == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1xxres / timeDelta
return [ducos1xxres, hashrate]
def Thread(
threadid: int,
accepted: int,
rejected: int,
requested_diff: str,
khashcount: int,
username: str,
efficiency: int,
rig_identiier: str,
algorithm: str,
hashrates_dict,
totalhashrate_mean,
NODE_ADDRESS: str,
NODE_PORT: int):
# Mining section for every thread
start_time = time()
report_shares, totalhashrate = 0, 0
while True:
while True:
try:
retry_counter = 0
while True:
try:
if socket():
socket().close()
if retry_counter >= 3:
debug_output(
'Error connecting after 3 retries, '
+ 'fetching new node IP')
NODE_ADDRESS, NODE_PORT = fetch_pools()
debug_output('Connecting to node ' +
str(NODE_ADDRESS) + ":" + str(NODE_PORT))
soc = socket()
soc.connect((str(NODE_ADDRESS), int(NODE_PORT)))
soc.settimeout(SOC_TIMEOUT)
server_version = soc.recv(100).decode()
if server_version:
break
except Exception as e:
retry_counter += 1
pretty_print("net0",
" Error connecting to mining node: "
+ str(e)
+ ", retrying in 5s",
"error")
sleep(5)
if threadid == 0:
soc.send(bytes("MOTD", encoding="utf8"))
motd = soc.recv(1024).decode().rstrip("\n")
if "\n" in motd:
motd = motd.replace("\n", "\n\t\t")
pretty_print("net" + str(threadid),
" MOTD: "
+ Fore.RESET
+ Style.NORMAL
+ str(motd),
"success")
if threadid == 0:
if float(server_version) <= float(MINER_VER):
# Miner is up-to-date
pretty_print(
"net"
+ str(threadid),
get_string("connected")
+ Fore.RESET
+ Style.NORMAL
+ get_string("connected_server")
+ str(server_version)
+ ", node: "
+ str(NODE_ADDRESS)
+ ":"
+ str(NODE_PORT)
+ ")",
"success")
else:
# Miner is outdated
pretty_print(
"sys"
+ str(threadid),
get_string("outdated_miner")
+ MINER_VER
+ ") -"
+ get_string("server_is_on_version")
+ server_version
+ Style.NORMAL
+ Fore.RESET
+ get_string("update_warning"),
"warning")
sleep(5)
break
except Exception as e:
# Socket connection error
pretty_print(
"net"
+ str(threadid),
get_string("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " (net err: "
+ str(e)
+ ")",
"error")
debug_output("Connection error: " + str(e))
sleep(10)
if algorithm == "XXHASH":
using_algo = get_string("using_algo_xxh")
else:
using_algo = get_string("using_algo")
pretty_print(
"sys"
+ str(threadid),
get_string("mining_thread")
+ str(threadid)
+ get_string("mining_thread_starting")
+ Style.NORMAL
+ Fore.RESET
+ using_algo
+ Fore.YELLOW
+ str(int(100 - efficiency * 100))
+ "% "
+ get_string("efficiency"),
"success")
# Mining section
while True:
try:
while True:
# Ask the server for job
if algorithm == "XXHASH":
soc.sendall(bytes(
"JOBXX,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
else:
soc.sendall(bytes(
"JOB,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
# Retrieve work
job = soc.recv(128).decode().rstrip("\n")
job = job.split(",")
debug_output("Thread "
+ str(threadid)
+ ": Received: " + str(job))
try:
diff = int(job[2])
debug_output("Thread "
+ str(threadid)
+ ": Correct job received")
break
except:
pretty_print("cpu" + str(threadid),
" Node message: "
+ job[1],
"warning")
sleep(3)
while True:
computetimeStart = time()
if algorithm == "XXHASH":
algo_back_color = Back.CYAN
result = ducos1xxh(job[0], job[1], diff, efficiency)
else:
algo_back_color = Back.YELLOW
result = ducos1(job[0], job[1], diff, efficiency)
computetimeStop = time()
computetime = computetimeStop - computetimeStart
debug_output("Thread "
+ str(threadid)
+ ": result found: "
+ str(result[0]))
# Convert to kH/s
threadhashcount = result[1] / 1000
try:
# Add this thread to the global hashrate counter
hashrates_dict[threadid] = threadhashcount
# Calculate total hashrate of all thrads
sharehashrate = 0
for thread in hashrates_dict.keys():
sharehashrate += hashrates_dict[thread]
totalhashrate_mean.append(sharehashrate)
# Get average from the last 20 hashrate measurements
totalhashrate = mean(totalhashrate_mean[-20:])
except Exception:
totalhashrate = threadhashcount
while True:
# Send result of hashing algorithm to the server
soc.sendall(bytes(
str(result[0])
+ ","
+ str(result[1])
+ ","
+ "Official PC Miner ("
+ str(algorithm)
+ ") v"
+ str(MINER_VER)
+ ","
+ str(rig_identiier),
encoding="utf8"))
responsetimetart = now()
feedback = soc.recv(64).decode().rstrip("\n")
responsetimestop = now()
ping = int((
responsetimestop - responsetimetart
).microseconds / 1000)
debug_output("Thread "
+ str(threadid)
+ ": Feedback received: "
+ str(feedback)
+ " Ping: "
+ str(ping))
if totalhashrate > 800:
# Format hashcount to MH/s
formattedhashcount = str(
"%03.2f" % round(totalhashrate / 1000, 2)
+ " MH/s")
elif totalhashrate > 100:
# Format for >100 kH/s
formattedhashcount = str(
"%03.0f" % float(totalhashrate)
+ " kH/s")
else:
# Format for small hashrates
formattedhashcount = str(
"%02.1f" % float(totalhashrate)
+ " kH/s")
diff = get_prefix(diff)
if feedback == "GOOD":
# If result was correct
accepted.value += 1
title(
get_string("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ get_string("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.GREEN
+ " ⛏"
+ get_string("accepted")
+ Fore.RESET
+ str(int(accepted.value))
+ "/"
+ str(int(accepted.value + rejected.value))
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " ⚙ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
elif feedback == "BLOCK":
# If block was found
accepted.value += 1
title(
get_string("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ get_string("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.CYAN
+ " ⛏"
+ get_string("block_found")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " ⚙ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
else:
# If result was incorrect
rejected.value += 1
title(
get_string("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ get_string("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Style.BRIGHT
+ Back.RESET
+ Fore.RED
+ " ✗"
+ get_string("rejected")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " ⚙ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
end_time = time()
elapsed_time = end_time - start_time
if (threadid == 0
and elapsed_time >= PERIODIC_REPORT_TIME):
report_shares = accepted.value - report_shares
uptime = calculate_uptime(mining_start_time)
periodic_report(start_time,
end_time,
report_shares,
totalhashrate,
uptime)
start_time = time()
break
break
except Exception as e:
pretty_print(
"net"
+ str(threadid),
get_string("error_while_mining")
+ Style.NORMAL
+ Fore.RESET
+ " (mining err: "
+ str(e)
+ ")",
"error")
debug_output("Error while mining: " + str(e))
sleep(5)
break
def periodic_report(start_time,
end_time,
shares,
hashrate,
uptime):
seconds = round(end_time - start_time)
pretty_print("sys0",
" "
+ get_string('periodic_mining_report')
+ Fore.RESET
+ Style.NORMAL
+ get_string('report_period')
+ str(seconds)
+ get_string('report_time')
+ get_string('report_body1')
+ str(shares)
+ get_string('report_body2')
+ str(round(shares/seconds, 1))
+ get_string('report_body3')
+ get_string('report_body4')
+ str(int(hashrate)) + " H/s"
+ get_string('report_body5')
+ str(int(hashrate*seconds))
+ get_string('report_body6')
+ get_string('total_mining_time')
+ str(uptime), "success")
def pretty_print(message_type, message, state):
# Prints colored output messages
# Usb/net/sys background
if message_type.startswith("net"):
background = Back.BLUE
elif message_type.startswith("cpu"):
background = Back.YELLOW
if message_type.startswith("sys"):
background = Back.GREEN
# Text color
if state == "success":
color = Fore.GREEN
elif state == "warning":
color = Fore.YELLOW
else:
color = Fore.RED
with thread_lock:
print(Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ background
+ " "
+ message_type
+ " "
+ Back.RESET
+ color
+ Style.BRIGHT
+ message
+ Style.NORMAL
+ Fore.RESET)
def initRichPresence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
debug_output("Discord rich presence initialized")
except Exception as e:
# Discord not launched
debug_output("Error launching Discord RPC thread: " + str(e))
def updateRichPresence():
# Update rich presence status
while True:
try:
# Calculate average total hashrate with prefix
totalhashrate = mean(totalhashrate_mean[-20:])
if totalhashrate > 800:
totalhashrate = str(round(totalhashrate / 1000, 2)) + " MH/s"
else:
totalhashrate = str(round(totalhashrate, 1)) + " kH/s"
RPC.update(
details="Hashrate: " + str(totalhashrate),
start=mining_start_time,
state="Acc. shares: "
+ str(accepted.value)
+ "/"
+ str(rejected.value + accepted.value),
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}
]
)
debug_output("Rich presence updated")
except Exception as e:
# Discord not launched
debug_output("Error launching Discord RPC thread: " + str(e))
sleep(15) # 15 seconds to respect Discord rate limit
def get_fastest_connection(server_ip: str):
connection_pool = []
available_connections = []
for i in range(len(AVAILABLE_PORTS)):
connection_pool.append(socket())
connection_pool[i].setblocking(0)
try:
connection_pool[i].connect((server_ip,
AVAILABLE_PORTS[i]))
connection_pool[i].settimeout(SOC_TIMEOUT)
except BlockingIOError as e:
pass
ready_connections, _, __ = select.select(connection_pool, [], [])
while True:
for connection in ready_connections:
try:
server_version = connection.recv(5).decode()
except:
continue
if server_version == b'':
continue
available_connections.append(connection)
connection.send(b'PING')
ready_connections, _, __ = select.select(available_connections, [], [])
ready_connections[0].recv(4)
return ready_connections[0].getpeername()[1]
def fetch_pools():
while True:
pretty_print("net0",
" "
+ get_string("connection_search")
+ "...",
"warning")
try:
response = requests.get(
"https://server.duinocoin.com/getPool"
).json()
pretty_print("net0",
get_string('connecting_node')
+ Fore.RESET
+ Style.NORMAL
+ str(response["name"]),
"success")
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
return NODE_ADDRESS, NODE_PORT
except Exception as e:
pretty_print("net0",
" Error retrieving mining node: "
+ str(e)
+ ", retrying in 15s",
"error")
sleep(15)
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
cpu = cpuinfo.get_cpu_info()
title(get_string("duco_python_miner") + str(MINER_VER) + ")")
if osname == "nt":
# Unicode fix for windows
ossystem("chcp 65001")
# Colorama
init(autoreset=True)
try:
from multiprocessing import (
Manager,
Process,
Value,
cpu_count,
current_process
)
manager = Manager()
# Multiprocessing globals
khashcount = Value("i", 0)
accepted = Value("i", 0)
rejected = Value("i", 0)
hashrates_dict = manager.dict()
totalhashrate_mean = manager.list()
except Exception as e:
print(e)
pretty_print(
"sys0",
" Multiprocessing is not available. "
+ "Please check permissions and/or your python installation. "
+ "Exiting in 10s.",
"error")
sleep(10)
_exit(1)
try:
# Load config file or create new one
loadConfig()
debug_output("Config file loaded")
except Exception as e:
pretty_print(
"sys0",
get_string("load_config_error")
+ RESOURCES_DIR
+ get_string("load_config_error_warning")
+ Style.NORMAL
+ Fore.RESET
+ " (config load err: "
+ str(e)
+ ")",
"error")
debug_output("Error reading configfile: " + str(e))
sleep(10)
_exit(1)
try:
# Display greeting message
Greeting()
debug_output("Greeting displayed")
except Exception as e:
pretty_print(
"sys0",
"Error displaying greeting message"
+ Style.NORMAL
+ Fore.RESET
+ " (greeting err: "
+ str(e)
+ ")",
"error")
debug_output("Error displaying greeting message: " + str(e))
try:
NODE_ADDRESS, NODE_PORT = fetch_pools()
except:
NODE_ADDRESS = "server.duinocoin.com"
NODE_PORT = 2813
debug_output("Using default server port and address")
try:
for x in range(int(threadcount)):
# Launch duco mining threads
thread.append(x)
thread[x] = Process(
target=Thread,
args=(
x,
accepted,
rejected,
requested_diff,
khashcount,
username,
efficiency,
rig_identiier,
algorithm,
hashrates_dict,
totalhashrate_mean,
NODE_ADDRESS,
NODE_PORT))
thread[x].start()
if x > 4 and x % 4 == 0:
# Don't launch burst of threads
sleep(5)
else:
sleep(0.1)
except Exception as e:
pretty_print(
"sys0",
"Error launching CPU thread(s)"
+ Style.NORMAL
+ Fore.RESET
+ " (cpu launch err: "
+ str(e)
+ ")",
"error")
debug_output("Error launching CPU thead(s): " + str(e))
if discord_presence == "y":
try:
# Discord rich presence threads
initRichPresence()
thrThread(
target=updateRichPresence).start()
except Exception as e:
debug_output("Error launching Discord RPC thead: " + str(e))
|
test_forward.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument
"""
Tensorflow testcases
====================
This article is a test script to test tensorflow operator with Relay.
"""
from __future__ import print_function
import threading
import numpy as np
import pytest
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import graph_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import init_ops
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_functional_ops
from distutils.version import LooseVersion
import tvm
from tvm import te
from tvm import relay
import tvm.relay.testing.tf as tf_testing
from tvm.runtime.vm import VirtualMachine
from packaging import version as package_version
import tvm.testing
#######################################################################
# Generic run functions for TVM & tensorflow
# ------------------------------------------
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
tf_dtypes = {
"float32": tf.float32,
"float16": tf.float16,
"float64": tf.float64,
"int32": tf.int32,
"uint8": tf.uint8,
"int8": tf.int8,
"int16": tf.int16,
"uint16": tf.uint16,
"int64": tf.int64,
}
def vmobj_to_list(o):
if isinstance(o, tvm.nd.NDArray):
return [o.asnumpy()]
elif isinstance(o, tvm.runtime.container.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f))
return result
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1])
hd = vmobj_to_list(o.fields[0])
hd.extend(tl)
return hd
elif o.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in o.constructor.name_hint:
return [0]
elif "tensor" in o.constructor.name_hint:
return [o.fields[0].asnumpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def run_tvm_graph(
graph_def,
input_data,
input_node,
num_output=1,
target="llvm",
out_names=None,
opt_level=3,
mode="graph_runtime",
cuda_layout="NCHW",
layout=None,
disabled_pass=None,
ignore_in_shape=False,
serialize=False,
):
""" Generic function to compile on relay and execute on tvm """
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
if target == "cuda":
layout = cuda_layout
target_host = None
if ignore_in_shape:
shape_dict = None
else:
shape_dict = {
e: i.shape if hasattr(i, "shape") else () for e, i in zip(input_node, input_data)
}
mod, params = relay.frontend.from_tensorflow(
graph_def, layout=layout, shape=shape_dict, outputs=out_names
)
ctx = tvm.context(target, 0)
if mode == "debug":
ex = relay.create_executor(mode, mod=mod, ctx=tvm.cpu(), target="llvm")
inputs = []
for param in mod["main"].params:
found = False
for i, n in enumerate(input_node):
if n == param.name_hint:
found = True
inputs.append(tvm.nd.array(input_data[i]))
break
# Interpreter doesn't bind constants, so still need to find in params
if not found:
inputs.append(tvm.nd.array(params[param.name_hint]))
result = ex.evaluate()(*inputs)
return vmobj_to_list(result)
elif mode == "vm":
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
print(mod["main"])
mod = relay.transform.InferType()(mod)
vm_exec = relay.vm.compile(mod, target="llvm", params=params)
if serialize:
code, lib = vm_exec.save()
vm_exec = tvm.runtime.vm.Executable.load_exec(code, lib)
vm = VirtualMachine(vm_exec, tvm.cpu())
inputs = {}
for e, i in zip(input_node, input_data):
inputs[e] = tvm.nd.array(i)
result = vm.invoke("main", **inputs)
return vmobj_to_list(result)
else:
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
graph, lib, params = relay.build(mod, target, target_host, params)
from tvm.contrib import graph_runtime
m = graph_runtime.create(graph, lib, ctx)
# set inputs
for e, i in zip(input_node, input_data):
m.set_input(e, tvm.nd.array(i))
m.set_input(**params)
# execute
m.run()
# get outputs
assert out_names is None or num_output == len(
out_names
), "out_names: {} num_output: {}".format(out_names, num_output)
tvm_output_list = [m.get_output(i).asnumpy() for i in range(num_output)]
return tvm_output_list
def run_tf_graph(sess, input_data, input_node, output_node):
""" Generic function to execute tensorflow """
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
output_node = convert_to_list(output_node)
tensor = [sess.graph.get_tensor_by_name(output_name) for output_name in output_node]
input_dict = {e: input_data[i] for i, e in enumerate(input_node)}
output_data = sess.run(tensor, input_dict)
return output_data
def compare_tf_with_tvm(
in_data,
in_name,
out_name,
init_global_variables=False,
no_gpu=False,
opt_level=3,
mode="graph_runtime",
cuda_layout="NCHW",
add_shapes_to_graph_def=True,
):
"""Generic function to generate and compare tensorflow and TVM output"""
def name_without_num(name):
return name.split(":")[0] if ":" in name else name
out_name = convert_to_list(out_name)
out_node = [name_without_num(name) for name in out_name]
in_data = convert_to_list(in_data)
in_name = convert_to_list(in_name)
in_node = [name_without_num(name) for name in in_name]
with tf.Session() as sess:
if init_global_variables:
sess.run(variables.global_variables_initializer())
final_graph_def = (
tf_testing.AddShapesToGraphDef(sess, out_node)
if add_shapes_to_graph_def
else tf.get_default_graph().as_graph_def()
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
for device in ["llvm", "cuda"]:
ctx = tvm.context(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
if no_gpu and device == "cuda":
continue
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=device,
out_names=out_name,
num_output=len(out_name),
opt_level=opt_level,
mode=mode,
cuda_layout=cuda_layout,
)
# since the names from tensorflow and relay runs are not exactly same,
# first len(tf_output) will be compared
for i in range(len(tf_output)):
if not isinstance(tf_output[i], np.ndarray):
assert len(tvm_output[i].shape) == 0
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
sess.close()
def is_gpu_available():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
gpu_list = [x.name for x in local_device_protos if x.device_type == "GPU"]
if len(gpu_list) > 0:
print("Tensorflow GPU:", gpu_list)
return True
else:
return False
#######################################################################
# Pooling
# -------
def _test_pooling_iteration(input_shape, **kwargs):
""" One iteration of pool operation with given shapes and attributes """
x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
nn_ops.pool(in_data, **kwargs)
if kwargs["pooling_type"] == "MAX":
out_name = "max_pool:0"
else:
out_name = "avg_pool:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def _test_pooling(input_shape, **kwargs):
_test_pooling_iteration(input_shape, **kwargs)
if is_gpu_available():
if len(input_shape) == 4:
input_shape = [input_shape[ii] for ii in (0, 3, 1, 2)]
kwargs["data_format"] = "NCHW"
_test_pooling_iteration(input_shape, **kwargs)
@tvm.testing.uses_gpu
def test_forward_pooling():
""" Pooling """
# TensorFlow only supports NDHWC for max_pool3d on CPU
for pool_type in ["AVG", "MAX"]:
# NDHWC is the default layout for max_pool3d and avg_pool3d in TensorFlow
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
# test cases for max_pool3d & avg_pool3d with layout NCDHW
# TensorFlow pool3d doesn't support NCDHW on cpu
if is_gpu_available():
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
data_format="NCDHW",
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
data_format="NCDHW",
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[2, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[2, 3],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[2, 1],
)
# Tests involving SpaceToBatchND
_test_pooling(
input_shape=[1, 1, 2, 1],
window_shape=[1, 1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 2],
)
_test_pooling(
input_shape=[1, 2, 1],
window_shape=[1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[2],
)
#######################################################################
# Convolution
# -----------
def _test_convolution(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=[],
add_shapes_to_graph_def=True,
):
""" One iteration of convolution with given shapes and attributes """
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv2d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv2D:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
elif opname == "conv_transpose":
nn_ops.conv2d_transpose(
in_data,
in_filter,
output_shape=deconv_output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"conv2d_transpose:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
else:
nn_ops.depthwise_conv2d_native(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"DepthwiseConv2dNative:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution():
if is_gpu_available():
_test_convolution("conv", [4, 176, 8, 8], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 19, 17, 17], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution("conv", [4, 124, 17, 17], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 12, 17, 17], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution(
"depthwise", [4, 176, 8, 8], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 19, 17, 17], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 124, 17, 17], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 17, 17],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 16, 16],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 19, 8, 8],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NCHW",
[1, 1, 8, 8],
)
_test_convolution("conv", [4, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("conv", [4, 17, 17, 124], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"conv",
[4, 17, 17, 12],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution("depthwise", [4, 8, 8, 176], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 19], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 124], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"depthwise",
[4, 17, 17, 12],
[3, 3, 12, 2],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 16, 16, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 19],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 12],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 19],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 12],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 8, 8, 19],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NHWC",
[1, 8, 8, 1],
)
# Test without adding shapes to graph def
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
add_shapes_to_graph_def=False,
)
#######################################################################
# Convolution3D
# -------------
def _test_convolution3d(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=[],
add_shapes_to_graph_def=True,
):
""" One iteration of 3D convolution with given shapes and attributes """
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NDHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv3d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv3D:0",
cuda_layout="NCDHW",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d():
if is_gpu_available():
_test_convolution3d(
"conv", [4, 176, 8, 8, 8], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 19, 17, 17, 17], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 124, 17, 17, 17], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 12, 17, 17, 17], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 8, 8, 8, 176], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 19], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 124], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 12], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
# Test without adding shapes to graph def
_test_convolution3d(
"conv",
[4, 17, 17, 17, 12],
[3, 3, 3, 12, 32],
[1, 1, 1],
[2, 2, 2],
"VALID",
"NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# Convolution3D Transpose
# -----------------------
def _test_convolution3d_transpose(
data_shape,
filter_shape,
strides,
padding,
output_shape,
data_format="NCDHW",
add_shapes_to_graph_def=True,
):
""" One iteration of 3D convolution transpose with given shapes and attributes """
dtype = "float32"
data_array = np.random.uniform(size=data_shape).astype(dtype)
filter_array = np.random.uniform(size=filter_shape).astype(dtype)
if data_format == "NDHWC":
strides = [1] + strides + [1]
else:
strides = [1, 1] + strides
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data_shape, dtype=dtype)
in_filter = constant_op.constant(filter_array, shape=filter_shape, dtype=dtype)
nn_ops.conv3d_transpose(
in_data,
in_filter,
output_shape=output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
data_array,
"Placeholder:0",
"conv3d_transpose:0",
cuda_layout="NDHWC",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d_transpose():
if is_gpu_available():
_test_convolution3d_transpose(
data_shape=[1, 10, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[4, 9, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[1, 3, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 6, 15, 15, 15],
)
_test_convolution3d_transpose(
data_shape=[1, 16, 8, 8, 8],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 6, 24, 24, 24],
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 10],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[4, 8, 8, 8, 9],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 3],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 15, 15, 15, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
)
# Test without adding shapes to graph def
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# BiasAdd
# -----------
def _test_biasadd(tensor_in_sizes, data_format):
""" One iteration of biasadd with given shapes and attributes """
total_size_1 = 1
for s in tensor_in_sizes:
total_size_1 *= s
tensor_bias_sizes = [tensor_in_sizes[1]] if data_format == "NCHW" else [tensor_in_sizes[3]]
total_size_2 = tensor_bias_sizes[0]
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
bias_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_bias = constant_op.constant(bias_array, shape=tensor_bias_sizes, dtype="float32")
nn_ops.bias_add(in_data, in_bias, data_format=data_format)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "BiasAdd:0"
)
@tvm.testing.uses_gpu
def test_forward_biasadd():
if is_gpu_available():
_test_biasadd([4, 176, 8, 8], "NCHW")
_test_biasadd([1, 100, 1, 1], "NCHW")
_test_biasadd([4, 19, 17, 17], "NCHW")
_test_biasadd([4, 124, 3, 3], "NCHW")
_test_biasadd([4, 8, 8, 176], "NHWC")
_test_biasadd([1, 1, 1, 100], "NHWC")
_test_biasadd([4, 17, 17, 19], "NHWC")
_test_biasadd([4, 3, 3, 124], "NHWC")
def _test_forward_where(input_shape):
with tf.Graph().as_default():
dtype = tf.float32
t = tf.constant(
np.random.choice([0, 1, -2, 3, -1, 0.1, -0.2], size=input_shape).astype(dtype.name)
)
out = tf.where(t)
compare_tf_with_tvm([], [], out.name, mode="debug")
compare_tf_with_tvm([], [], out.name, mode="vm")
def test_forward_argwhere():
_test_forward_where((5,))
_test_forward_where((5, 5))
_test_forward_where((5, 5, 5))
_test_forward_where((5, 5, 5, 5))
_test_forward_where((5, 5, 5, 5, 5))
#######################################################################
# SpaceToBatchND
# --------------
def _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def _test_space_to_batch_nd_infer_paddings(input_shape, block_shape, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
padding_np = np.array([0, 1]).astype(np.int32).reshape((1, 2))
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
const1 = tf.constant(padding_np, dtype=tf.int32)
# make paddings an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
paddings = tf.reverse(const1, axis=[-1])
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_space_to_batch_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/space-to-batch-n-d
_test_space_to_batch_nd(input_shape=[1, 2, 2, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 2, 2, 3], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 4, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(
input_shape=[2, 2, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/spacetobatch_op_test.py
_test_space_to_batch_nd(input_shape=[2, 3], block_shape=[2], paddings=[[1, 0]], dtype="float32")
_test_space_to_batch_nd(
input_shape=[2, 3, 2], block_shape=[2], paddings=[[1, 0]], dtype="float64"
)
_test_space_to_batch_nd_infer_paddings(input_shape=[2, 3, 2], block_shape=[2])
#######################################################################
# BatchToSpaceND
# --------------
def _test_batch_to_space_nd(input_shape, block_shape, crops, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.batch_to_space_nd(in_data, block_shape, crops)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_batch_to_space_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d
_test_batch_to_space_nd(input_shape=[4, 1, 1, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 1, 1, 3], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 2, 2, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(
input_shape=[8, 1, 3, 1], block_shape=[2, 2], crops=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/batchtospace_op_test.py
_test_batch_to_space_nd(
input_shape=[18, 2, 1, 2], block_shape=[2, 3], crops=[[1, 1], [0, 0]], dtype="float32"
)
_test_batch_to_space_nd(
input_shape=[20, 5, 8, 7], block_shape=[2, 2], crops=[[1, 1], [1, 1]], dtype="float64"
)
#######################################################################
# Reshape
# -------
def _test_reshape(data, out_shape):
""" One iteration of reshape operation with given data and out shape """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_with_call():
""" relay.expr.Call as shape """
data = np.zeros((6, 4, 2))
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out_shape = tf.constant([1, 2, 3], dtype="int32")
out_shape = tf.multiply(out_shape, 2)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_like(data, shape_like):
""" A special case for reshape. """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
in_shape_like = array_ops.placeholder(shape=shape_like.shape, dtype=data.dtype)
out_shape = array_ops.shape(in_shape_like)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_symbolic(data, a_data, b_data):
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
a = array_ops.placeholder(shape=a_data.shape, dtype=a_data.dtype)
b = array_ops.placeholder(shape=b_data.shape, dtype=b_data.dtype)
newshape = tf.add(a, b)
out = array_ops.reshape(in_data, newshape)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[data, a_data, b_data], [in_data.name, a.name, b.name], out.name, mode=mode
)
def test_forward_reshape():
_test_reshape(np.arange(6.0), [2, 3])
_test_reshape(np.arange(6), [-1, 2])
_test_reshape(np.arange(6), [3, -1])
_test_reshape(np.arange(6), [-1])
_test_reshape_with_call()
_test_reshape_like(np.zeros((3, 6)), np.zeros((9, 2)))
_test_reshape_symbolic(np.arange(6.0), np.array([2, 0]), np.array([0, 3]))
_test_reshape_symbolic(np.arange(6), np.array([-1, 0]), np.array([0, 2]))
_test_reshape_symbolic(np.arange(6), np.array([3, 0]), np.array([3, -1]))
_test_reshape_symbolic(np.arange(6), np.array([0]), np.array([-1]))
#######################################################################
# DepthToSpace
# ------------
def _test_depthtospace(data, block_size):
""" One iteration of depth_to_space operation with given data and block size """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.depth_to_space(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "DepthToSpace:0")
def test_forward_depthtospace():
_test_depthtospace(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_depthtospace(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# SpaceToDepth
# ------------
def _test_spacetodepth(data, block_size):
""" One iteration of space_to_depth operation with given data and block size """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.space_to_depth(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "SpaceToDepth:0")
def test_forward_spacetodepth():
_test_spacetodepth(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_spacetodepth(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# Squeeze
# -------
def _test_squeeze(data, squeeze_dims=None):
""" One iteration of squeeze """
if squeeze_dims is None:
squeeze_dims = []
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if squeeze_dims:
array_ops.squeeze(in_data, squeeze_dims)
else:
array_ops.squeeze(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Squeeze:0")
def test_forward_squeeze():
""" Squeeze """
# Nothing to squeeze.
_test_squeeze(np.arange(2).reshape((2)))
_test_squeeze(np.arange(6).reshape((2, 3)))
# Squeeze the middle element away.
_test_squeeze(np.arange(4).reshape((2, 1, 2)))
# Squeeze on both ends.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)))
# Positive squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [2, 4])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0, 4, 2])
# Negative squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-1])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5, -1])
#######################################################################
# TensorArray
# -----------
def test_tensor_array_write_read():
def run(dtype_str, infer_shape, element_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
in_data = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(
dtype=dtype, size=2, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
out = ta3.read(0)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, False, None)
run(dtype, False, tf.TensorShape([None, 2]))
run(dtype, True, None)
def test_tensor_array_scatter():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
if infer_shape:
element_shape = tf.TensorShape([tf.Dimension(None)])
else:
element_shape = None
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str), dtype=dtype)
indices = tf.constant([2, 1, 0])
ta1 = tf.TensorArray(
dtype=dtype, size=3, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.scatter(indices, t)
out0 = ta2.read(0)
out1 = ta2.read(1)
out2 = ta2.read(2)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_gather():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
gather_indices = tf.constant([1, 2])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
t1 = ta2.gather(gather_indices)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_split():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
out0 = ta2.read(0)
out1 = ta2.read(1)
out2 = ta2.read(2)
out3 = ta2.read(3)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_3:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_concat():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
t = ta2.concat()
out = tf.identity(t)
compare_tf_with_tvm([], [], ["Identity:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_size():
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
in_data = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(dtype=dtype, size=2, infer_shape=infer_shape)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
out = ta3.size()
g = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_stack():
def run(dtype_str, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
t1 = ta2.stack()
print(t1)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayStack/TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_unstack():
def run(dtype_str, input_shape, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.random.choice([0, 1, 2, 3], size=input_shape).astype(dtype.name))
ta1 = tf.TensorArray(dtype=dtype, infer_shape=infer_shape, size=input_shape[0])
ta2 = ta1.unstack(t)
out0 = ta2.size()
out1 = ta2.read(0)
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, (5,), False)
run(dtype, (5, 5), True)
run(dtype, (5, 5, 5), False)
run(dtype, (5, 5, 5, 5), True)
#######################################################################
# ConcatV2
# --------
def _test_concat_v2(shape1, shape2, dim):
""" One iteration of ConcatV2 """
with tf.Graph().as_default():
dtype = "float32"
in1 = tf.placeholder(shape=shape1, dtype=dtype, name="in1")
in2 = tf.placeholder(shape=shape2, dtype=dtype, name="in2")
array_ops.concat_v2([in1, in2], dim)
np_data1 = np.random.uniform(size=shape1).astype(dtype)
np_data2 = np.random.uniform(size=shape2).astype(dtype)
compare_tf_with_tvm([np_data1, np_data2], ["in1:0", "in2:0"], "ConcatV2:0")
def test_forward_concat_v2():
if tf.__version__ < LooseVersion("1.4.1"):
return
_test_concat_v2([2, 3], [2, 3], 0)
_test_concat_v2([10, 3, 5], [2, 3, 5], 0)
_test_concat_v2([2, 3], [2, 3], 1)
_test_concat_v2([5, 8], [5, 4], 1)
_test_concat_v2([2, 8, 5], [2, 8, 6], -1)
#######################################################################
# Sigmoid
# -------
def _test_sigmoid(data):
""" One iteration of sigmoid """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
sigmoid_out = math_ops.sigmoid(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Sigmoid:0")
def test_forward_sigmoid():
""" Sigmoid """
_test_sigmoid(np.random.uniform(size=(3, 4, 4, 3)).astype("float32"))
#######################################################################
# Argmin/Argmax
# -------------
def _test_argx(func, data, **kwargs):
with tf.Graph().as_default():
inp = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="c0")
func(inp, name="argx0", **kwargs)
compare_tf_with_tvm(data, "c0:0", "argx0:0")
def test_forward_argminmax():
for output_type in [tf.int64, tf.int32]:
for axis in [None, 0, 1, 2]:
data = np.random.uniform(size=(8, 4, 9)).astype("float32")
_test_argx(tf.argmax, data=data, axis=axis, output_type=output_type)
_test_argx(tf.argmin, data=data, axis=axis, output_type=output_type)
#######################################################################
# Variable
# --------
def _test_variable(data):
""" One iteration of a variable """
tf.reset_default_graph()
with tf.Graph().as_default():
input_op = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
input_tensor = array_ops.reshape(input_op, data.shape)
size = input_tensor.shape.dims[1]
with variable_scope.variable_scope("linear", reuse=None):
w = variable_scope.get_variable("w", shape=[size, size], dtype=input_tensor.dtype)
math_ops.matmul(input_tensor, w)
compare_tf_with_tvm(data, "Placeholder:0", "MatMul:0", init_global_variables=True)
def test_forward_variable():
"""Variable type op test"""
_test_variable(np.random.uniform(size=(32, 100)).astype("float32"))
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_read_variable_op(target, ctx):
""" Read Variable op test """
tf.reset_default_graph()
data = np.random.uniform(size=(32, 100)).astype("float32")
input_tensor = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
size = input_tensor.shape.dims[1]
var_data = np.random.uniform(-5, 5, size=[size, size]).astype(np.float32)
input_var = tf.Variable(var_data, name="var1", use_resource=True)
math_ops.matmul(input_tensor, input_var)
out_name = ["MatMul:0"]
out_node = ["MatMul"]
in_name = ["Placeholder:0"]
in_node = ["Placeholder"]
in_data = [data]
with tf.Session() as sess:
sess.run(variables.global_variables_initializer())
final_graph_def = sess.graph.as_graph_def(add_shapes=True)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
shape_dict = {e: i.shape for e, i in zip(in_name, in_data)}
with pytest.raises(Exception) as execinfo:
mod, params = relay.frontend.from_tensorflow(
final_graph_def, layout=None, shape=shape_dict, outputs=None
)
assert execinfo.value.args[0].startswith("Graph is not frozen. Provide a frozen graph")
# Now convert the variables to constant and run inference on the converted graph
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=target,
out_names=out_name,
num_output=len(out_name),
)
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-4, rtol=1e-5)
sess.close()
#######################################################################
# MatMul, BatchMatMul, BatchMatMulV2
# ----------------------------------
def _test_matmul(i, j, k, dtype, outer=None):
""" One iteration of matmul """
A_shape_init = [i, j]
B_shape_init = [j, k]
for transpose_a in [False, True]:
for transpose_b in [False, True]:
outer = outer or []
A_shape = outer + (A_shape_init[::-1] if transpose_a else A_shape_init)
B_shape = outer + (B_shape_init[::-1] if transpose_b else B_shape_init)
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, transpose_a=transpose_a, transpose_b=transpose_b)
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([A_np, B_np], [A.name, B.name], result.name)
def test_forward_matmul():
""" MatMul op test"""
_test_matmul(1, 3, 6, "int32")
_test_matmul(5, 3, 1, "float64")
def _test_batch_matmul(A_shape, B_shape, dtype, adjoint_a=False, adjoint_b=False):
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([A_np, B_np], [A.name, B.name], result.name)
def test_forward_batch_matmul():
""" TF op BatchMatMul, BatchMatMulV2 test"""
_test_batch_matmul((3, 5, 4), (3, 4, 5), "int32")
_test_batch_matmul((3, 5, 4), (3, 4, 5), "float32", True, True)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "int32", True, False)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "float32", False, True)
_test_batch_matmul((2, 3, 4, 5, 6), (2, 3, 4, 6, 5), "int32")
_test_batch_matmul((1, 2, 3, 4, 5, 6), (1, 2, 3, 4, 6, 5), "float32", True, True)
_test_batch_matmul((3, 4, 5, 6), (3, 4, 5, 6), "int32", True, False)
_test_batch_matmul((2, 3, 4, 2, 3, 4, 5, 6), (2, 3, 4, 2, 3, 4, 5, 6), "float32", False, True)
#######################################################################
# StridedSlice
# ------------
def _test_stridedslice(
ip_shape,
begin,
end,
stride,
dtype,
begin_mask=0,
end_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
ellipsis_mask=0,
):
""" One iteration of a Stridedslice """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.strided_slice(
in_data,
begin,
end,
stride,
begin_mask=begin_mask,
end_mask=end_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask,
ellipsis_mask=ellipsis_mask,
name="strided_slice",
)
np_data = np.random.uniform(size=ip_shape).astype(dtype)
compare_tf_with_tvm(np_data, "in_data:0", "strided_slice:0")
def test_forward_stridedslice():
"""test StridedSlice"""
_test_stridedslice((2), [1], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice((2, 1), [0], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice((2, 3, 4), [0], [1], [1], "float32", shrink_axis_mask=8)
_test_stridedslice((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], "float32")
_test_stridedslice((3, 4, 3), [1, 0], [4, 3], [2, 1], "float32", ellipsis_mask=8)
_test_stridedslice((3, 4, 3), [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice((3, 4, 5, 3), [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice((3, 4, 5, 3), [1, 0, 1], [4, 2, 2], [2, 1, 1], "float32", ellipsis_mask=2)
_test_stridedslice((3, 4, 3), [1, 1, 0], [4, 4, 2], [2, 1, 1], "float32", new_axis_mask=5)
_test_stridedslice(
(3, 4, 3), [1, 1, 1], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=4
)
_test_stridedslice(
(6, 4, 5), [1, 1, 1], [6, 3, 4], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=5
)
_test_stridedslice(
(3, 4, 3), [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=4, new_axis_mask=2
)
_test_stridedslice(
(3, 4, 3), [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
(3, 4, 3), [1, 1, 0], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
(3, 4, 3), [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=2
)
_test_stridedslice((3, 4), [1, 0], [4, 4], [1, 1], "float32", shrink_axis_mask=2)
_test_stridedslice(
(3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=2
)
_test_stridedslice(
(3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=1, new_axis_mask=2
)
_test_stridedslice(
(3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=1
)
_test_stridedslice(
(3, 4, 5, 4, 5, 6), [0, 0], [2, 3], [1, 1], "float32", shrink_axis_mask=5, new_axis_mask=1
)
_test_stridedslice(
(3, 4, 5, 4, 5, 6),
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=5,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=8,
end_mask=8,
)
_test_stridedslice(
(3, 4, 5, 4, 5, 6),
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
(3, 4, 5, 4, 5, 6),
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=16,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
(3, 4, 5, 4, 5, 6),
[1, 2, 0, -3],
[4, 5, 3, 3],
[2, 2, 1, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=8,
)
#######################################################################
# FloorDiv, RealDiv
# -----------------
def _test_forward_divide(ip_shape, dtype):
np_numer = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_denomin = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
denominator = tf.placeholder(dtype, ip_shape, name="denomin")
tf.math.divide(numerator, denominator, name="RealDiv")
compare_tf_with_tvm([np_numer, np_denomin], ["numer:0", "denomin:0"], "RealDiv:0")
def _test_forward_floordiv(ip_shape, dtype):
np_numer = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
tf.math.floordiv(numerator, tf.constant(5, dtype=dtype), name="FloorDiv")
compare_tf_with_tvm([np_numer], ["numer:0"], "FloorDiv:0")
def test_forward_divide():
"""test FloorDiv, RealDiv"""
_test_forward_divide((4,), "int32")
_test_forward_divide((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "int32")
#######################################################################
# FloorMod
# --------
def _test_forward_floormod(in_shape, if_shape, dtype):
np_numer = np.random.uniform(1, 100, size=in_shape).astype(dtype)
np_factor = np.random.uniform(1, 100, size=if_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, in_shape, name="numer")
factor = tf.placeholder(dtype, if_shape, name="factor")
tf.floormod(numerator, factor, name="FloorMod")
compare_tf_with_tvm([np_numer, np_factor], ["numer:0", "factor:0"], "FloorMod:0")
def test_forward_floormod():
"""test FloorMod"""
_test_forward_floormod((10,), (10,), "float32")
_test_forward_floormod((8, 2), (1,), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "int32")
#######################################################################
# TruncateMod
# -----------
def _test_forward_truncatemod(ip_shape, dtype):
np_data_1 = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_data_2 = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data_1 = tf.placeholder(dtype, ip_shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, ip_shape, name="in_data_2")
tf.truncatemod(in_data_1, in_data_2, name="truncatemod")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "truncatemod:0")
def test_forward_truncatemod():
"""test TruncateMod"""
_test_forward_truncatemod((4, 3, 7), "int32")
#######################################################################
# Gather, GatherV2
# --------------------------
def _test_gather(ip_shape, indice_shape, indice_value, axis, dtype):
""" One iteration of a GatherV2 """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
indices = tf.placeholder("int32", indice_shape, name="indices")
out = tf.gather(in_data, indices, axis=axis)
np_data = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
def _fill_indices(indice_value):
indices = np.array(ip_shape, dtype=dtype)
if isinstance(indice_value, int):
indices = np.array([indice_value], dtype="int32")
else:
indices = np.asarray(indice_value, dtype="int32")
return indices
np_indices = _fill_indices(indice_value)
compare_tf_with_tvm([np_data, np_indices], ["in_data:0", "indices:0"], out.name)
def test_forward_gather():
"""test Gather/GatherV2 layer"""
_test_gather((4,), (1,), 1, 0, "int32")
_test_gather((4,), (1,), 1, 0, "float32")
_test_gather((1, 4), (1,), [0], 0, "int32")
_test_gather((4,), (1, 2, 2), [[[1, 0], [0, 1]]], 0, "float32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 1, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, "float32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 0, "int32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 2, "int32")
_test_gather((4, 3, 5, 6), (1, 4), [[2, 1, 0, 0]], 0, "float32")
#######################################################################
# GatherND
# --------------------------
def _test_gather_nd(ip_shape, indice_value, dtype):
"""test operator GatherNd"""
np_data = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.gather_nd(in_data, indices=indice_value, name="gather_nd")
compare_tf_with_tvm([np_data], ["in_data:0"], "gather_nd:0")
def test_forward_gather_nd():
"""test operator GatherNd"""
_test_gather_nd((2, 2), [[0, 0], [1, 1]], "float32")
_test_gather_nd((2, 2, 2), [[1, 0, 0], [0, 0, 0]], "float32")
_test_gather_nd((4,), [1], "float32")
_test_gather_nd((4,), [1], "int32")
_test_gather_nd((1, 4), [0, 3], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "float32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((4, 3, 5, 6), [[2, 1, 0, 0]], "float32")
_test_gather_nd((3, 3, 3), [[[2, 1]]], "int32")
#######################################################################
# BiasAdd
# -------
def test_forward_bias_add():
"""test Op BiasAdd"""
def check_bias_add(lh_shpae, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shpae).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.nn.bias_add(lft_data, rgt_data, name="BiasAdd")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "BiasAdd:0")
check_bias_add((10, 8, 16, 32), (32,), dtype="int32")
check_bias_add((10, 20), (20,), dtype="float32")
#######################################################################
# Split
# -----
def _test_split(in_shape, axis, num_or_size_splits, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
""" One iteration of a Split """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
num_split = (
len(num_or_size_splits) if isinstance(num_or_size_splits, list) else num_or_size_splits
)
split = tf.split(in_data, num_or_size_splits, axis=axis)
relu = [tf.nn.relu(i) for i in split]
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in relu])
# and now test together with concat
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
splitted = tf.split(in_data, num_or_size_splits, axis=axis)
concat = tf.concat(splitted, axis)
compare_tf_with_tvm([np_data], "in_data:0", concat.name)
def test_forward_split():
"""test split layer"""
# rank 1
_test_split((3,), 0, 1, "float32")
_test_split((3,), 0, 3, "float32")
_test_split((6,), 0, 3, "float32")
# rank 2
_test_split((6, 2), 0, 3, "float32")
_test_split((2, 6), 1, 6, "float32")
# rank 3
_test_split((6, 2, 4), 0, 2, "int32")
_test_split((2, 6, 4), 1, 3, "float32")
_test_split((2, 4, 6), 2, 1, "float32")
# rank 4
_test_split((6, 1, 3, 5), 0, 3, "float32")
_test_split((1, 6, 3, 5), 1, 3, "float32")
_test_split((1, 3, 6, 5), 2, 3, "float32")
_test_split((1, 3, 5, 6), 3, 3, "float32")
# split along negative axis
_test_split((6, 1, 3, 5), -4, 3, "float32")
_test_split((1, 6, 3, 5), -3, 3, "float32")
_test_split((1, 3, 6, 5), -2, 3, "float32")
_test_split((1, 3, 5, 6), -1, 3, "float32")
# size_splits list
_test_split((6,), 0, [1, 2, 3], "int32")
_test_split((3, 6, 4), -2, [1, 4, 1], "float32")
######################################################################
# TopKV2
# ------
def _test_forward_top_k_v2(in_shape, k):
np_data = np.random.uniform(-100, 100, size=in_shape).astype("float32")
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder("float32", in_shape, name="in_data")
tf.math.top_k(in_data, k, name="TopK")
compare_tf_with_tvm([np_data], ["in_data:0"], "TopK:0")
def test_forward_top_k_v2():
_test_forward_top_k_v2((3,), 1)
_test_forward_top_k_v2((3,), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
#######################################################################
# Unstack
# -------
def _test_unstack(ip_shape, axis, dtype):
np_data = np.random.uniform(-5, 5, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
unstack = tf.unstack(in_data, axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in unstack])
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.stack(tf.unstack(in_data, axis=axis), axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], "stack:0")
def test_forward_unstack():
"""test unstack layer"""
_test_unstack((6,), 0, "int32")
_test_unstack((2, 6), 1, "float64")
# negative axis
_test_unstack((1, 4), -1, "int32")
_test_unstack((3, 6, 4), -2, "float32")
#######################################################################
# Tile
# ----
def _test_tile(in_shape, multiples, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.tile(in_data, multiples=multiples, name="tile")
compare_tf_with_tvm([np_data], ["in_data:0"], "tile:0")
def test_forward_tile():
"""test Tile"""
_test_tile((2,), (3,), "int32")
_test_tile((2, 2), (2, 3), "float32")
_test_tile((2, 4, 6), (6, 7, 8), "float64")
#######################################################################
# ClipByValue
# -----------
def _test_forward_clip_by_value(ip_shape, clip_value_min, clip_value_max, dtype):
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.clip_by_value(in_data, clip_value_min, clip_value_max, name="ClipByValue")
np_data = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
compare_tf_with_tvm([np_data], ["in_data:0"], "ClipByValue:0")
def test_forward_clip_by_value():
"""test ClipByValue op"""
if tf.__version__ < LooseVersion("1.9"):
_test_forward_clip_by_value((4,), 0.1, 5.0, "float32")
_test_forward_clip_by_value((4, 4), 1, 5, "int32")
#######################################################################
# Multi Input to graph
# --------------------
def test_forward_multi_input():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
out1 = tf.add(in1, in2, name="out1")
out2 = tf.subtract(in3, in4, name="out2")
out = tf.multiply(out1, out2, name="out")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
compare_tf_with_tvm(
[in_data, in_data, in_data, in_data], ["in1:0", "in2:0", "in3:0", "in4:0"], "out:0"
)
#######################################################################
# Multi Output to Graph
# ---------------------
def test_forward_multi_output():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
out1 = tf.add(in1, in2, name="out1")
out2 = tf.subtract(in3, in4, name="out2")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
in_data = [in_data] * 4
in_name = ["in1:0", "in2:0", "in3:0", "in4:0"]
out_name = ["out1:0", "out2:0"]
out_node = [out.strip(":0") for out in out_name]
in_node = [inp.strip(":0") for inp in in_name]
with tf.Session() as sess:
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
tvm_output = run_tvm_graph(
final_graph_def, in_data, in_node, target="llvm", out_names=out_node, num_output=2
)
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
#######################################################################
# Resize Bilinear, Nearest_Neighbor
# ---------------------------------
def _test_resize_bilinear(in_shape, to_shape, align_corners):
""" One iteration of resize bilinear """
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_bilinear(in_data, shape_data, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_bilinear_from_tensor(in_shape, align_corners):
"""One iteration of resize bilinear with non-constant output shape, requires
value inference to get proper output shape."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], None, None, in_shape[3]], dtype=data.dtype
)
to_shape = tf.shape(in_data)[1:3]
tf.image.resize_bilinear(in_data, to_shape, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_nearest_neighbor(in_shape, to_shape):
""" One iteration of resize nearest neighbor """
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_nearest_neighbor(in_data, shape_data, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def _test_resize_nearest_neighbor_dynamic_shape(in_shape, scale):
""" One iteration of resize nearest neighbor for graph with dynamic input shape """
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=None, dtype=data.dtype)
# multiply input shape by scale factor
new_shape = tf.shape(in_data)[1:3] * tf.constant(scale, dtype=tf.int32)
tf.image.resize_nearest_neighbor(in_data, new_shape, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def test_forward_resize():
""" Resize Bilinear, Nearest_Neighbor """
# TF default layout is NHWC
_test_resize_bilinear((4, 32, 32, 3), [50, 50], False)
_test_resize_bilinear((6, 32, 32, 3), [20, 20], True)
_test_resize_bilinear_from_tensor((4, 32, 32, 3), False)
_test_resize_bilinear_from_tensor((6, 50, 50, 3), True)
_test_resize_nearest_neighbor((6, 32, 32, 3), [20, 20])
_test_resize_nearest_neighbor_dynamic_shape((1, 16, 16, 3), scale=[2, 2])
#######################################################################
# BroadcastTo
# -----------
def _test_broadcast_to(in_shape, to_shape):
""" One iteration of broadcast_to"""
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0", opt_level=0)
def _test_broadcast_to_from_tensor(in_shape):
""" One iteration of broadcast_to with unknown shape at graph build"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=[None], dtype=data.dtype)
shape_data = tf.multiply(tf.shape(in_data), 32)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0")
def test_forward_broadcast_to():
""" Resize Bilinear """
_test_broadcast_to((4, 1, 32, 32), [4, 8, 32, 32])
_test_broadcast_to((6, 32, 32, 1), [6, 32, 32, 16])
_test_broadcast_to_from_tensor((1))
#######################################################################
# Fill
# ----
def _test_fill(in_shape):
""" Use the fill op to create a tensor of ones with non-constant shape."""
with tf.Graph().as_default():
tf.ones(shape=in_shape, dtype="float32")
compare_tf_with_tvm(in_shape, [], "ones:0", opt_level=1)
def _test_fill_from_tensor(in_shape):
"""Use the fill op to create a tensor of ones with non-constant shape.
Some extra ops need to be added here to prevent the graph from
being fully constant and folded away."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], in_shape[1], None, None], dtype=data.dtype
)
x = tf.ones(shape=2 * tf.shape(in_data), dtype=data.dtype)
y = tf.math.add(in_data, tf.reduce_mean(x), name="out1")
compare_tf_with_tvm(data, "Placeholder:0", "out1:0")
def _test_fill_symbolic_inputs(in_shape_data, in_value_data, dtype):
with tf.Graph().as_default():
in_shape = tf.placeholder(shape=[in_shape_data.shape[0]], dtype=in_shape_data.dtype)
in_value = tf.placeholder(shape=(), dtype=dtype)
out = tf.fill(in_shape, in_value)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[in_shape_data, in_value_data], [in_shape.name, in_value.name], out.name, mode=mode
)
def test_forward_fill():
""" Resize Bilinear """
_test_fill((32))
_test_fill((6, 32, 64, 64))
_test_fill_from_tensor((6, 32, 64, 64))
_test_fill_symbolic_inputs(np.array((2,)), np.int32(9), tf.int32)
_test_fill_symbolic_inputs(np.array((2, 3)), 9, tf.int64)
_test_fill_symbolic_inputs(np.array((2, 3, 4)), np.float32(9.0), tf.float32)
#######################################################################
# Crop to bounding box
# --------------------
def _test_crop(in_shape, off_h, off_w, tar_h, tar_w):
""" Crop to bounding box """
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
tf.image.crop_to_bounding_box(in_data, off_h, off_w, tar_h, tar_w)
compare_tf_with_tvm(data, "Placeholder:0", "crop_to_bounding_box/Slice:0")
def test_forward_crop():
""" Crop to bounding box """
_test_crop((1, 224, 224, 3), 20, 20, 120, 120)
#######################################################################
# CropAndResize
# -------------
def _test_forward_crop_and_resize(
img_shape,
boxes,
box_idx,
crop_size,
extrapolation_value=0.0,
method="bilinear",
dtype="float32",
):
image = np.random.uniform(0, 10, size=img_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = array_ops.placeholder(dtype, image.shape, name="in_data")
tf.image.crop_and_resize(
in_data,
boxes=boxes,
box_ind=box_idx,
crop_size=crop_size,
method=method,
extrapolation_value=extrapolation_value,
name="crop_and_resize",
)
compare_tf_with_tvm([image], ["in_data:0"], "crop_and_resize:0")
def test_forward_crop_and_resize():
""" CropAndResize """
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3])
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2)
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2, "nearest")
_test_forward_crop_and_resize([1, 11, 11, 3], [[0.3, 0.3, 1, 1]], [0], [21, 21])
_test_forward_crop_and_resize([1, 41, 41, 3], [[0.2, 0.4, 0.8, 0.8]], [0], [21, 11])
_test_forward_crop_and_resize([1, 100, 100, 3], [[0, 0, 0.9, 0.9]], [0], [30, 30])
_test_forward_crop_and_resize([1, 224, 224, 3], [[0.1, 0.2, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize([1, 249, 249, 3], [[0, 0, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize([1, 201, 301, 3], [[0.2, 0.3, 0.7, 0.8]], [0], [51, 51])
_test_forward_crop_and_resize(
img_shape=[10, 11, 11, 3],
boxes=[[0, 0, 0.9, 0.9], [0.2, 0.2, 0.8, 0.8]],
box_idx=[0, 1],
crop_size=[5, 5],
)
_test_forward_crop_and_resize(
img_shape=[20, 576, 576, 3],
boxes=[[0, 0, 1, 1], [0, 0, 0.8, 0.8], [0.1, 0.2, 0.9, 1], [0.2, 0, 1, 1]],
box_idx=[1, 0, 2, 3],
crop_size=[24, 24],
extrapolation_value=0.3,
)
_test_forward_crop_and_resize(
img_shape=[20, 229, 229, 3],
boxes=[[0, 0, 0.9, 0.9], [0.3, 0.3, 1, 1], [0.2, 0.1, 0.7, 0.8], [0, 0, 1, 1]],
box_idx=[3, 0, 2, 1],
crop_size=[58, 58],
extrapolation_value=0.2,
method="nearest",
)
#######################################################################
# Non Max Suppression
# -------------------
def _test_forward_nms_v3(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.non_max_suppression(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="debug",
)
def _test_forward_nms_v4(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
indices_padded, num_valid = tf.image.non_max_suppression_padded(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
pad_to_max_output_size=True,
)
num_valid = tf.reshape(num_valid, shape=(-1,))
indices_padded = tf.reshape(indices_padded, shape=(-1,))
tf.slice(indices_padded, tf.constant([0]), num_valid, name="SlicedIndices")
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="debug",
)
def test_forward_nms():
""" NonMaxSuppressionV3,4 """
for _test_forward_nms in [_test_forward_nms_v3]:
_test_forward_nms((5, 4), (5,), 0.7, 0.5, 5)
_test_forward_nms((20, 4), (20,), 0.5, 0.6, 10)
_test_forward_nms((1000, 4), (1000,), 0.3, 0.7, 1000)
_test_forward_nms((2000, 4), (2000,), 0.4, 0.6, 7)
#######################################################################
# LSTM
# ----
def _test_lstm_cell(batch_size, num_hidden, num_layers, forget_bias, dtype):
""" One iteration of a LSTM cell """
tf.reset_default_graph()
input_size = num_hidden
input_data = np.full((batch_size, input_size), 1.0, dtype=dtype)
in_state_c = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
in_state_h = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
def _get_tensorflow_output():
with tf.Session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)
):
m0 = tf.placeholder(dtype, [batch_size, num_hidden], name="m0")
m1 = tf.placeholder(dtype, [batch_size, num_hidden], name="m1")
x = tf.placeholder(shape=(batch_size, input_size), dtype=dtype, name="input")
g, ((out_m0, out_m1)) = tensorflow.contrib.rnn.LSTMBlockCell(
num_hidden, forget_bias=forget_bias
)(x, (m0, m1))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, out_m0, out_m1],
{
x.name: np.array([[1.0, 1.0]]),
m0.name: in_state_c,
m1.name: in_state_h,
},
)
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(
sess, graph_def, ["root/lstm_cell/LSTMBlockCell"]
)
return final_graph_def, res
graph_def, tf_out = _get_tensorflow_output()
tvm_output = run_tvm_graph(
graph_def,
[input_data, in_state_c, in_state_h],
["root/input", "root/m0", "root/m1"],
num_output=7,
)
assert isinstance(tvm_output, list)
tvm.testing.assert_allclose(tf_out[0], tvm_output[6], rtol=1e-3, atol=1e-3)
tvm.testing.assert_allclose(tf_out[1], tvm_output[1], rtol=1e-3, atol=1e-3)
def test_forward_lstm():
"""test LSTM block cell"""
if package_version.parse(tf.VERSION) < package_version.parse("2.0.0"):
# in 2.0, tf.contrib.rnn.LSTMBlockCell is removed
_test_lstm_cell(1, 2, 1, 0.5, "float32")
#######################################################################
# Pack
# ---
def _test_pack(axis, shape, **kwargs):
a = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
b = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
with tf.Graph().as_default():
tf_a = array_ops.placeholder(shape=shape, dtype="float32", name="pl_a")
tf_b = array_ops.placeholder(shape=shape, dtype="float32", name="pl_b")
tf_c = tf.stack([tf_a, tf_b], axis=axis, **kwargs)
assert tf_c.op.op_def.name == "Pack", "tf.stack() is expected to produce 'Pack' operation"
compare_tf_with_tvm([a, b], ["pl_a:0", "pl_b:0"], "stack:0")
def test_forward_pack():
for axis in range(-3, 3):
_test_pack(axis, [3, 2, 1])
for axis in range(-1, 1):
_test_pack(axis, [3])
_test_pack(0, [])
#######################################################################
# Unpack
# ------
def _test_forward_unpack(in_shape, axis, dtype):
"""test operator Unpack"""
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.unstack(in_data, axis=axis, name="Unpack")
compare_tf_with_tvm([np_data], ["in_data:0"], "Unpack:0")
def test_forward_unpack():
_test_forward_unpack((3,), 0, "int32")
_test_forward_unpack((3,), -1, "int16")
_test_forward_unpack((21, 23, 3), 2, "float32")
#######################################################################
# Range
# -----
def test_forward_range():
"""test operator Range"""
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 18, 3, name="range")
compare_tf_with_tvm([], [], "range:0")
"""test type assignment for operator Range"""
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 256 + 1, 1, dtype=tf.float32)
compare_tf_with_tvm([], [], "range:0")
#######################################################################
# Pad
# ---
def _test_pad(input_shape, paddings, mode, **kwargs):
""" One iteration of pad operation with given shape"""
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
pad_values = constant_op.constant(paddings)
pad = tf.pad(in_data, paddings=pad_values, mode=mode, **kwargs)
if mode == "CONSTANT":
if "constant_values" in kwargs:
out_name = "PadV2:0"
else:
out_name = "Pad:0"
else:
out_name = "MirrorPad:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def test_forward_pad():
""" Pad """
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT", constant_values=1.0)
_test_pad((2, 3), [[1, 1], [2, 2]], mode="SYMMETRIC")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="REFLECT")
#######################################################################
# Logical operators
# --------------------
def test_logical_and():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_and(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_or():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_or(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_xor():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_xor(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_not():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
out = tf.logical_not(in1, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm(in_data1, "in1:0", "out:0")
def test_forward_logical():
test_logical_and()
test_logical_or()
test_logical_xor()
test_logical_not()
#######################################################################
# Where, Select
# -------------
def test_forward_where():
""" Where: return elements depending on conditions"""
with tf.Graph().as_default():
with tf.Session() as sess:
input1 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input1")
input2 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input2")
mask = input1 > input2
tf.where(mask, input1 + 1, input2 * 2)
in_data1 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
in_data2 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
compare_tf_with_tvm([in_data1, in_data2], ["input1:0", "input2:0"], "Select:0")
#######################################################################
# Inception V3
# ------------
def test_forward_inception_v3():
"""test inception V3 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"InceptionV3/inception_v3_2016_08_28_frozen-with_shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 299, 299, 3)).astype("float32")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input:0", "InceptionV3/Predictions/Reshape_1:0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Inception V1
# ------------
def test_forward_inception_v1():
"""test inception V1 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("InceptionV1/classify_image_graph_def-with_shapes.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
# Build an image from random data.
from PIL import Image
from tvm.contrib import util
img_array = np.random.uniform(size=(1, 600, 600, 3)).astype("uint8")
img = Image.frombuffer("RGB", (600, 600), img_array.tostring(), "raw", "RGB", 0, 1)
temp = util.tempdir()
img_path = temp.relpath("tf-test.jpg")
img.save(img_path)
import os.path
if not tf.gfile.Exists(os.path.join(img_path)):
tf.logging.fatal("File does not exist %s", img_path)
data = tf.gfile.FastGFile(os.path.join(img_path), "rb").read()
temp.remove()
# Extract tensorflow decoded image frame for tvm input
with tf.Session() as sess:
tvm_data = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "DecodeJpeg:0")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "softmax:0")
tvm_output = run_tvm_graph(graph_def, tvm_data, "DecodeJpeg/contents")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Mobilenet
# ---------
def test_forward_mobilenet():
"""test mobilenet model"""
# MobilenetV2
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz",
"mobilenet_v2_1.4_224_frozen.pb",
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "MobilenetV2/Predictions/Reshape_1"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "input:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# ResnetV2
# --------
@tvm.testing.requires_gpu
def test_forward_resnetv2():
"""test resnet model"""
if is_gpu_available():
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"ResnetV2/resnet-20180601_resnet_v2_imagenet-shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(128, 224, 224, 3)).astype("float32")
out_node = "ArgMax"
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input_tensor:0", out_node + ":0")
for device in ["llvm", "cuda"]:
ctx = tvm.context(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def, data, "input_tensor", len(tf_output), target=device
)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# SSD
# ---
def _test_ssd_impl():
"""Test SSD with backbone MobileNet V1"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"object_detection/ssd_mobilenet_v1_ppn_shared_"
"box_predictor_300x300_coco14_sync_2018_07_03.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(0.0, 255.0, size=(1, 512, 512, 3)).astype("uint8")
in_node = "image_tensor"
out_node = ["detection_boxes", "detection_scores", "detection_classes"]
with tf.Session() as sess:
tf_output = run_tf_graph(
sess, data, "{}:0".format(in_node), ["{}:0".format(oname) for oname in out_node]
)
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
ctx = tvm.context(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def,
data,
in_node,
len(out_node),
target=device,
layout="NCHW",
out_names=out_node,
mode="vm",
disabled_pass=["FoldScaleAxis"],
serialize=True,
)
for i in range(len(out_node)):
tvm.testing.assert_allclose(tvm_output[i], tf_output[i], rtol=1e-3, atol=1e-3)
def test_forward_ssd():
run_thread = threading.Thread(target=_test_ssd_impl, args=())
old_stack_size = threading.stack_size(100 * 1024 * 1024)
run_thread.start()
run_thread.join()
threading.stack_size(old_stack_size)
#######################################################################
# Placeholder
# -----------
def test_forward_placeholder():
"""test a simple pb with Placeholder node in the end of GraphDef"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("Custom/placeholder.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "mul"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "Placeholder:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "Placeholder")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# PTB
# ---
try:
# Load contrib for running ptb model in tf version before 2.0
import tensorflow.contrib
except:
pass
def test_forward_ptb():
"""test ptb model"""
config = tf_testing.get_config()
num_steps = config.num_steps
num_hidden = config.hidden_size
num_layers = config.num_layers
batch_size = config.batch_size
vocab_size = config.vocab_size
out_sample_shape = (batch_size, vocab_size)
out_state_shape = (batch_size, num_hidden)
# Sample input
inpt = "we have no useful information on"
cnt_sample = 20
def _pretty_print(items, is_char_model, id2word):
if not is_char_model:
return " ".join([id2word[x] for x in items])
else:
return "".join([id2word[x] for x in items]).replace("_", " ")
def _get_tvm_graph_module(graph_def):
# Cell inputs 'c and 'h' consist of all layers values
shape_dict = {"Model/Placeholder": (batch_size, num_steps)}
mod, params = relay.frontend.from_tensorflow(
graph_def,
shape=shape_dict,
outputs=[
"Model/Softmax:0",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:6",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:6",
],
)
target = "llvm"
with tvm.transform.PassContext(opt_level=0):
graph, lib, params = relay.build(mod, target, params=params)
from tvm.contrib import graph_runtime
ctx = tvm.cpu(0)
return params, graph_runtime.create(graph, lib, ctx)
def _do_tvm_sample(model, data, in_states, params, num_samples):
"""Sampled from the model"""
samples = []
state = in_states
sample = None
def _get_sample(data, state):
input_data = np.full((batch_size, num_steps), data, dtype="int32")
model.set_input("Model/Placeholder", tvm.nd.array(input_data.astype("int32")))
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros",
tvm.nd.array(state[0].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros_1",
tvm.nd.array(state[1].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros",
tvm.nd.array(state[2].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros_1",
tvm.nd.array(state[3].astype("float32")),
)
model.set_input(**params)
model.run()
tvm_output = model.get_output(0, tvm.nd.empty(out_sample_shape, "float32")).asnumpy()
state_output = []
for i in range(4):
state_output.append(
model.get_output(i + 1, tvm.nd.empty(out_state_shape, "float32")).asnumpy()
)
sample = tf_testing.pick_from_weight(tvm_output[0])
return sample, state_output
for x in data:
sample, state = _get_sample(x, state)
if sample is not None:
samples.append(sample)
else:
samples.append(0)
k = 1
while k < num_samples:
sample, state = _get_sample(samples[-1], state)
samples.append(sample)
k += 1
return samples, state
with tf.Graph().as_default():
word_to_id, id_to_word, graph_def = tf_testing.get_workload_ptb()
vocab_size = len(word_to_id)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
sess = tf.Session()
# TVM graph module creation
params, m = _get_tvm_graph_module(graph_def)
# Create 10 predicted statments of 20 words
cnt_stm = 0
while cnt_stm < 10:
cnt_stm += 1
in_state = [np.full((batch_size, num_hidden), 0, dtype="float32")] * 2 * num_layers
seed_for_sample = inpt.split()
tvm_samples, tvm_state = _do_tvm_sample(
m, [word_to_id[word] for word in seed_for_sample], in_state, params, cnt_sample
)
tvm_sample_str = _pretty_print(tvm_samples, False, id_to_word)
tf_samples, tf_state = tf_testing.do_tf_sample(
sess, [word_to_id[word] for word in seed_for_sample], in_state, cnt_sample
)
tf_sample_str = _pretty_print(tf_samples, False, id_to_word)
inpt = tvm_sample_str
tvm.testing.assert_allclose(tf_samples, tvm_samples, rtol=1e-5, atol=1e-5)
assert tvm_sample_str == tf_sample_str
#######################################################################
# LRN (Local Response Normalization)
# ----------------------------------
def _test_lrn(ishape, size, axis, bias, alpha, beta):
""" testing local response normalization """
lrn_depth_radius = size / 2
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype, name="lrn0_data")
nn_ops.local_response_normalization(
in1, name="lrn", depth_radius=lrn_depth_radius, bias=bias, alpha=alpha, beta=beta
)
compare_tf_with_tvm(inp_array, "lrn0_data:0", "lrn:0")
def test_forward_lrn():
_test_lrn((1, 3, 20, 20), 3, 1, 1.0, 1.0, 0.5)
#######################################################################
# l2_normalize
# ------------
def _test_l2_normalize(ishape, eps, axis):
""" testing l2 normalize (uses max, sum, square, sqrt frontend operators)"""
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
nn.l2_normalize(in1, axis=axis, epsilon=eps, name=None, dim=None)
compare_tf_with_tvm(inp_array, "Placeholder:0", "l2_normalize:0")
def test_forward_l2_normalize():
_test_l2_normalize((1, 3, 20, 20), 0.001, (0,))
#######################################################################
# transpose
# ---------
def _test_forward_transpose(ishape, axes=None):
data = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
if axes is None:
tf.transpose(in1)
else:
tf.transpose(in1, perm=axes)
compare_tf_with_tvm(data, "transpose_data:0", "transpose:0")
def _test_forward_tranapose_axes_input(ishape, axes):
data = np.random.uniform(size=ishape).astype(np.float32)
axes_np = np.array(axes).astype(np.int32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
const1 = tf.constant(axes_np, dtype=tf.int32)
# make axes an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
axes = tf.reverse(const1, axis=[-1])
tf.transpose(in1, axes)
compare_tf_with_tvm([data], ["transpose_data:0"], "transpose:0")
def test_forward_transpose():
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4))
_test_forward_transpose((7, 8, 8, 10))
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4), (0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), (3, 0, 1, 2))
_test_forward_tranapose_axes_input((2, 3, 4), (1, 2, 0))
_test_forward_tranapose_axes_input((2, 3, 4, 5), (3, 0, 1, 2))
def _test_forward_slice_operation_input(input_value, begin_value, size_value):
input_data = np.array(input_value, dtype=np.float32)
with tf.Graph().as_default():
input_tensor = tf.placeholder(shape=input_data.shape, dtype=input_data.dtype, name="input")
tf.slice(input_tensor, begin_value, size_value, name="slice_output")
compare_tf_with_tvm([input_data], ["input:0"], "slice_output:0")
def test_forward_slice():
_test_forward_slice_operation_input([1, 1], [0], [2])
_test_forward_slice_operation_input([0, 1, 2, 3], [3], [-1])
_test_forward_slice_operation_input(
[[0, 1, 2, 3], [4, 5, 6, 7]], begin_value=[0, 1], size_value=[-1, -1]
)
def test_forward_ceil():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.ceil(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Ceil:0")
def test_forward_floor():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.floor(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Floor:0")
def test_forward_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_runtime", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.relu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Relu:0", mode=mode)
def test_forward_leaky_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_runtime", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.leaky_relu(in1, alpha=0.4)
compare_tf_with_tvm(inp_array, "Placeholder:0", "LeakyRelu:0", mode=mode)
def test_forward_elu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.elu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Elu:0")
def test_forward_selu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.selu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Selu:0")
def test_forward_tanh():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.tanh(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Tanh:0")
#######################################################################
# Softmax
# -------
def test_forward_softmax():
"""test operator Softmax """
def check_softmax(in_shape, axis, dtype):
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.nn.softmax(in_data, axis=axis, name="Softmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "Softmax:0")
check_softmax((2, 3, 5), 2, "float32")
check_softmax((2, 3, 5), -1, "float32")
#######################################################################
# Tensor
# ------
def test_forward_round():
"""test Round"""
np_data = np.random.uniform(-10, 10, size=(5, 7)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7), name="in_data")
tf.round(in_data, name="round")
compare_tf_with_tvm([np_data], ["in_data:0"], "round:0")
def test_forward_abs():
"""test operator Abs"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.abs(in_data, name="abs")
compare_tf_with_tvm([np_data], ["in_data:0"], "abs:0")
def _test_forward_zeros_like(in_shape, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.zeros_like(in_data, name="zeros_like")
compare_tf_with_tvm([np_data], ["in_data:0"], "zeros_like:0")
def test_forward_zeros_like():
if tf.__version__ < LooseVersion("1.2"):
_test_forward_zeros_like((2, 3), "int32")
_test_forward_zeros_like((2, 3, 5), "int8")
_test_forward_zeros_like((2, 3, 5, 7), "uint16")
_test_forward_zeros_like((2, 3, 11), "float32")
_test_forward_zeros_like((2, 3, 11), "float64")
def test_forward_squared_difference():
ishape = (1, 3, 10, 14)
inp_array_a = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
inp_array_b = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array_a.shape, dtype=inp_array_a.dtype, name="in1")
in2 = tf.placeholder(shape=inp_array_b.shape, dtype=inp_array_b.dtype, name="in2")
out = tf.math.squared_difference(in1, in2)
compare_tf_with_tvm([inp_array_a, inp_array_b], [in1.name, in2.name], out.name)
def _test_forward_reverse_v2(in_shape, axis, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.reverse(in_data, axis=[axis], name="reverse")
compare_tf_with_tvm([np_data], ["in_data:0"], "reverse:0")
def test_forward_reverse_v2():
"""test ReverseV2"""
_test_forward_reverse_v2((2, 3), 0, "int32")
_test_forward_reverse_v2((2, 3, 5), 2, "float32")
_test_forward_reverse_v2((2, 3, 5, 7), 1, "float32")
_test_forward_reverse_v2((2, 3, 5), -1, "float64")
_test_forward_reverse_v2((2, 3, 5), -3, "float64")
def test_forward_sign():
"""test Sign"""
np_data = np.random.uniform(-10, 10, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sign(in_data, name="sign")
compare_tf_with_tvm([np_data], ["in_data:0"], "sign:0")
def test_forward_square():
"""test operator Square """
np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.square(in_data, name="square")
compare_tf_with_tvm([np_data], ["in_data:0"], "square:0")
def test_forward_pow_exp():
"""test Pow and Exp """
np_in1 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
np_in2 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in1 = tf.placeholder(tf.float32, (5, 7, 11), name="in1")
in2 = tf.placeholder(tf.float32, (5, 7, 11), name="in2")
out1 = tf.pow(in1, in2, name="pow")
out = tf.exp(in1, name="exp")
compare_tf_with_tvm([np_in1, np_in2], ["in1:0", "in2:0"], "pow:0")
compare_tf_with_tvm([np_in1], ["in1:0"], "exp:0")
def test_forward_unary():
def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32):
"""test unary operators"""
np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, (2, 3, 5), name="in_data")
out = op(in_data)
compare_tf_with_tvm([np_data], ["in_data:0"], out.name)
_test_forward_unary(tf.acos, -1, 1)
_test_forward_unary(tf.asin, -1, 1)
_test_forward_unary(tf.atanh, -1, 1)
_test_forward_unary(tf.sinh)
_test_forward_unary(tf.cosh)
_test_forward_unary(tf.acosh)
_test_forward_unary(tf.asinh)
_test_forward_unary(tf.atan)
_test_forward_unary(tf.sin)
_test_forward_unary(tf.cos)
_test_forward_unary(tf.tan)
_test_forward_unary(tf.tanh)
_test_forward_unary(tf.erf)
_test_forward_unary(tf.log)
_test_forward_unary(tf.log1p)
def test_forward_atan2():
"""test operator tan """
tf.disable_eager_execution()
np_data_1 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
np_data_2 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
in_data_1 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_1")
in_data_2 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_2")
tf.atan2(in_data_1, in_data_2, name="atan2")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "atan2:0")
def test_forward_negative():
"""test tf operator Neg """
np_data = np.random.uniform(-100, 255, size=(224, 224, 3)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (224, 224, 3), name="in_data")
tf.negative(in_data, name="negative")
compare_tf_with_tvm([np_data], ["in_data:0"], "negative:0")
def test_forward_log_softmax():
"""test operator LogSoftmax"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.log_softmax(in_data, name="LogSoftmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "LogSoftmax:0")
def test_forward_softplus():
"""test operator Softplus"""
np_data = np.random.uniform(1, 10, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.nn.softplus(in_data, name="softplus")
compare_tf_with_tvm([np_data], ["in_data:0"], "softplus:0")
def test_forward_rsqrt():
"""test Rsqrt """
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.rsqrt(in_data, name="rsqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "rsqrt:0")
def test_forward_sqrt():
"""test Sqrt """
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sqrt(in_data, name="sqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "sqrt:0")
def _test_forward_right_shift(in_shape, dtype):
"""test operator RightShift"""
lh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 8, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.right_shift(lft_data, rgt_data, name="RightShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "RightShift:0")
def test_forward_right_shift():
_test_forward_right_shift((7,), "int32")
_test_forward_right_shift((3, 11), "int16")
def _test_forward_left_shift(in_shape, dtype):
"""test operator LeftShift"""
lh_data = np.random.randint(100, 1000000, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.left_shift(lft_data, rgt_data, name="LeftShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "LeftShift:0")
def test_forward_left_shift():
_test_forward_left_shift((10,), "int32")
_test_forward_left_shift((224, 224, 3), "int16")
#######################################################################
# Mean
# ----
def test_forward_mean():
def check_mean(ishape, **kwargs):
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.keras.backend.mean(in1, **kwargs)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Mean:0", no_gpu=True)
check_mean((10, 8, 16, 32))
check_mean((10, 8, 16, 32), axis=(2, 3))
check_mean((10, 8, 16, 32), axis=(1, 2), keepdims=True)
#######################################################################
# Size
# ----
def test_forward_size():
def check_size(ishape):
np_input = np.random.uniform(size=ishape).astype(np.float32)
# if all dimensions are constant, TF will optimize away size operator into constant
tf_input_shape = list(np_input.shape)
tf_input_shape[0] = None
with tf.Graph().as_default():
input = tf.placeholder(shape=tf_input_shape, dtype=np_input.dtype, name="input")
tf.size(input, name="size")
compare_tf_with_tvm([np_input], ["input:0"], "size:0")
check_size((10, 8, 16, 32))
check_size((10,))
#######################################################################
# All, Any, Max, Min, Prod, variance, std, logsumexp, euclidean_norm
# ------------------------------------------------------------------
def test_forward_reduce():
def _check_op(tf_op, ishape, axis, keepdims, dtype="float32"):
tf.reset_default_graph()
if dtype == "bool":
np_data = np.random.choice([True, False], size=ishape)
else:
np_data = np.random.uniform(size=ishape).astype(dtype)
if tf_op == tf.math.reduce_prod:
axis = 1
np_data = np_data.reshape(1, -1)
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, name="in_data")
reduce_op = tf_op(in_data, axis=axis, keepdims=keepdims, name="reduce_std")
compare_tf_with_tvm([np_data], ["in_data:0"], reduce_op.name)
def _test_math_op(op, dtypes=["int32", "float32"]):
for dtype in dtypes:
_check_op(op, (3, 10), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (8, 16, 32), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True, dtype=dtype)
_check_op(op, (2, 3, 10, 10), axis=(1, 2), keepdims=True, dtype=dtype)
_test_math_op(tf.math.reduce_all, dtypes=["bool"])
_test_math_op(tf.math.reduce_any, dtypes=["bool"])
_test_math_op(tf.math.reduce_max)
_test_math_op(tf.math.reduce_min)
_test_math_op(tf.math.reduce_prod)
_test_math_op(tf.math.reduce_variance, dtypes=["float32"])
_test_math_op(tf.math.reduce_std, dtypes=["float32"])
_test_math_op(tf.math.reduce_logsumexp, dtypes=["float32"])
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
_test_math_op(tf.math.reduce_euclidean_norm)
#######################################################################
# Relational operators
# --------------------
def _test_forward_rel_op(data, func):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data[0].shape, dtype=data[0].dtype, name="in1")
in2 = tf.placeholder(shape=data[1].shape, dtype=data[1].dtype, name="in2")
op = func(in1, in2, name="op")
out = tf.cast(op, tf.int32, name="out1")
compare_tf_with_tvm([data[0], data[1]], ["in1:0", "in2:0"], "out1:0")
def test_forward_rel_ops():
t1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
t2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])
_test_forward_rel_op([t1, t2], math_ops.less)
_test_forward_rel_op([t1, t2], math_ops.greater)
_test_forward_rel_op([t1, t2], math_ops.less_equal)
_test_forward_rel_op([t1, t2], math_ops.greater_equal)
_test_forward_rel_op([t1, t2], math_ops.equal)
_test_forward_rel_op([t1, t2], math_ops.not_equal)
#######################################################################
# ExpandDims
# ----------
def _test_forward_expand_dims(data, axis):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="in1")
out = tf.expand_dims(in1, axis)
compare_tf_with_tvm([data], [in1.name], out.name)
def test_forward_expand_dims():
_test_forward_expand_dims(np.int32(1), 0)
_test_forward_expand_dims(np.array([1]), 0)
_test_forward_expand_dims(np.array([1]), -1)
_test_forward_expand_dims(np.array([[1], [2]]), 0)
_test_forward_expand_dims(np.array([[1], [2]]), 1)
_test_forward_expand_dims(np.array([[1], [2]]), -1)
#######################################################################
# Maximum, Minimum
# ----------------
def test_forward_maximum():
"""test Op Maximum"""
def check_maximum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.maximum(lft_data, rgt_data, name="maximum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "maximum:0")
check_maximum((10, 8, 16, 32), (1,), dtype="int32")
check_maximum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
def test_forward_minimum():
"""test Op Minimum"""
def check_minimum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.minimum(lft_data, rgt_data, name="minimum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "minimum:0")
check_minimum((10, 8, 16, 32), (1,), dtype="int32")
check_minimum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
#######################################################################
# PlaceholderWithDefault
# ----------------------
def test_placeholder():
with tf.Graph().as_default():
in_data1 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
var1 = tf.Variable(in_data1, name="in1")
var2 = array_ops.placeholder_with_default(var1, None, name="place1")
in_data2 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
place1 = array_ops.placeholder(shape=in_data1.shape, dtype=in_data1.dtype, name="in2")
out1 = tf.math.add(var1, var2, name="out1")
out2 = tf.math.add(out1, place1, name="out2")
compare_tf_with_tvm(
[in_data1, in_data2], ["place1:0", "in2:0"], "out2:0", init_global_variables=True
)
#######################################################################
# OneHot
# ----------------------
def _test_forward_one_hot(indices_shape, depth, on_value, off_value, axis, out_dtype):
inp_array1 = np.random.randint(0, 5, size=indices_shape)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array1.shape, dtype=inp_array1.dtype)
out = tf.one_hot(in1, depth, on_value, off_value, axis, dtype=out_dtype)
compare_tf_with_tvm(inp_array1, in1.name, out.name)
def test_forward_one_hot():
_test_forward_one_hot((3,), 3, 1, 0, -1, "int32")
_test_forward_one_hot((3,), 3, 1.0, 0.0, -1, "float32")
_test_forward_one_hot((2, 2), 5, 2, -2, 0, "int32")
_test_forward_one_hot((2, 2), 5, 0.5, -0.5, 1, "float32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
#######################################################################
# AddN
# ----------------------
def _test_forward_add_n(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.add_n(temp)
compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name)
def test_forward_add_n():
x = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
y = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
z = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
m, n, o = x.astype(np.float32), y.astype(np.float32), z.astype(np.float32)
in0 = x
in1 = [x, y]
in2 = (x, y, z)
in3 = m
in4 = [m, n]
in5 = (m, n, o)
_test_forward_add_n(in0)
_test_forward_add_n(in1)
_test_forward_add_n(in2)
_test_forward_add_n(in3)
_test_forward_add_n(in4)
_test_forward_add_n(in5)
#######################################################################
# Sharing params case
# ----------------------
def test_sharing_node():
"""Test the sharing params case."""
np_data = np.random.uniform(size=(2, 2, 2)).astype("float32")
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, shape=(2, 2, 2), name="in_data")
axis = tf.constant([-1], dtype=tf.int32, name="axis")
mean0 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean0")
mean1 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean1")
out = tf.add(mean0, mean1, name="out")
compare_tf_with_tvm([np_data], ["in_data:0"], "out:0")
#######################################################################
# Unravel Index
# ----------------------
def _test_forward_unravel_index(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.unravel_index(temp[0], temp[1])
compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name)
def _test_forward_unravel_index_scalar(x, y, dtype="int32"):
tf.reset_default_graph()
with tf.Graph().as_default():
indices_1 = constant_op.constant(x, dtype=dtype)
dims_1 = constant_op.constant(y, dtype=dtype)
out_1 = array_ops.unravel_index(indices_1, dims_1)
compare_tf_with_tvm([], [], out_1.name)
def test_forward_unravel_index():
x = np.array([0, 1, 2, 3])
y = np.array([2, 2])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([2, 3])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([6])
_test_forward_unravel_index([x, y])
x = np.array([102, 300, 16])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
x = np.array([100])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
# Test scalar input
_test_forward_unravel_index_scalar(13, [1, 4, 5, 2])
#######################################################################
# Dilation2d
# ----------------------
def _test_dilation2d(tensor_in_sizes, filter_in_sizes, strides, dilations, padding):
""" One iteration of dilation2d with given shapes and attributes """
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
nn_ops.dilation2d(in_data, in_filter, strides=strides, rates=dilations, padding=padding)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Dilation2D:0",
no_gpu=True,
)
def test_forward_dilation():
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [3, 3, 1], [1, 1, 1, 1], [1, 2, 2, 1], "VALID")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 28, 28, 3], [5, 5, 3], [1, 2, 2, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [7, 2, 1], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [3, 4, 1], [1, 2, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 4, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 28, 28, 3], [5, 6, 3], [1, 1, 2, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 2, 1], "VALID")
#######################################################################
# infinity ops
# ------------
def _verify_infiniteness_ops(tf_op, name):
"""test operator infinity ops"""
# Only float types are allowed in Tensorflow for isfinite and isinf
# float16 is failing on cuda
tf_dtypes = ["float32", "float64"]
for tf_dtype in tf_dtypes:
shape = (8, 8)
data = np.random.uniform(size=shape).astype(tf_dtype)
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
tf.reset_default_graph()
in_data = tf.placeholder(tf_dtype, shape, name="in_data")
tf_op(in_data, name=name)
compare_tf_with_tvm([data], ["in_data:0"], "{}:0".format(name))
def test_forward_isinf():
_verify_infiniteness_ops(tf.is_inf, "isinf")
def test_forward_isfinite():
_verify_infiniteness_ops(tf.is_finite, "isfinite")
def _test_spop_placeholder_without_shape_info():
with tf.Graph().as_default():
@function.Defun(*[tf.int32] * 2)
def Forward(x, y):
print(x.name)
print(y.name)
b = tf.add(x, y)
return b
pl1 = tf.placeholder(tf.int32, name="pl1")
pl2 = tf.placeholder(tf.int32, name="pl2")
pl3 = tf.placeholder(tf.int32, name="pl3")
data = np.array([[-1, 1], [2, -2]], dtype=np.int32)
data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1, pl2], Tout=[tf.int32], f=Forward)
z2 = z1 + pl3
compare_tf_with_tvm(
[data, data2, data3],
["pl1:0", "pl2:0", "pl3:0"],
["StatefulPartitionedCall:0", z2.name],
mode="vm",
init_global_variables=True,
)
def _test_spop_placeholder_with_shape_and_default_value():
with tf.Graph().as_default():
data = np.ones([1], dtype=int).astype(np.int32)
dataVar = tf.Variable(data, shape=data.shape)
pl1 = array_ops.placeholder_with_default(dataVar, shape=data.shape, name="pl1")
tpl = tf.convert_to_tensor(pl1, dtype=tf.int32)
@function.Defun(*[tf.int32])
def pl_with_default(pl):
return tf.expand_dims(tf.multiply(pl, pl), 0)
z = gen_functional_ops.StatefulPartitionedCall(
args=[tpl], Tout=[tf.int32], f=pl_with_default
)
compare_tf_with_tvm(
data, ["pl1:0"], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_arange_feed():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_array_feed():
with tf.Graph().as_default():
t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32)
t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32)
t1 = tf.placeholder(tf.int32, name="t1")
t2 = tf.placeholder(tf.int32, name="t2")
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_basic():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_nested():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), name="t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, name="t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def myfunc(x, y):
return tf.add(x, y, "myfunc")
@tf.function
def myfunc2(x, y):
z = myfunc(x, y)
l = myfunc(z, y)
m = myfunc(l, z)
return tf.add(l, m, "myfunc2")
res1 = myfunc(t1, t2)
res2 = myfunc2(res1, t1)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [res2.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_no_autograph():
with tf.Graph().as_default():
@tf.function(autograph=False)
def fun1(a):
return tf.multiply(a, a)
@tf.function(autograph=False)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_defun():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)],
Tout=[dtypes.float32],
f=fun3,
name="SpopFnInvocation",
)
compare_tf_with_tvm([], [], "SpopFnInvocation:0", mode="vm", init_global_variables=True)
def _test_spop_arithmetic():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 3)
def arithmetic(m, x, c):
z = tf.add(tf.multiply(m, x), c)
return z
m = tf.constant(10)
x = tf.constant(20)
c = tf.constant(2)
spopFn = gen_functional_ops.StatefulPartitionedCall(
args=[m, x, c], Tout=[tf.int32], f=arithmetic
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_control_flow():
with tf.Graph().as_default():
@function.Defun(*[dtypes.float32] * 2)
def Body1(x, y):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:0"):
z = math_ops.multiply(x, y)
i = 0
while i < 10:
i += 1
if i == 5:
continue
z = math_ops.multiply(x, y * i)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[constant_op.constant(32.0), constant_op.constant(100.0)],
Tout=[dtypes.float32],
f=Body1,
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_variables():
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32)
var2 = tf.Variable(const2, dtype=tf.int32)
@function.Defun(tf.int32, tf.int32)
def Forward(x, y):
return tf.multiply(x, y)
z = gen_functional_ops.StatefulPartitionedCall(
args=[var1, var2], Tout=[tf.int32], f=Forward
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", init_global_variables=True, mode="vm"
)
def _test_spop_constants():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 2)
def constantsFn(x, y):
vv = tf.constant([2, 3, 4], name="vv")
z = tf.add(vv + x, y)
return z
a = tf.constant(20000, name="a")
b = tf.constant(40000, name="b")
spopFn = gen_functional_ops.StatefulPartitionedCall(
args=[a, b], Tout=[tf.int32], f=constantsFn
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_stateful():
# This test case is to test that TVM rejects any TF stateful operations
# (including Resource Variables) except StatefulPartitionedCall/PartitionedCall
# (as these two operators can still be used as container graphs to execute
# "stateless" operations internally.
tf.reset_default_graph()
with tf.Graph().as_default():
@tf.function
def FunctionWithStatefulOp_One(i):
b = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
y = tf.multiply(b, i)
return y
@tf.function
def FunctionWithStatefulOp(m, n):
a = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
x = tf.multiply(a, m)
y = FunctionWithStatefulOp_One(n)
z = tf.multiply(x, y)
return z
op = FunctionWithStatefulOp(constant_op.constant(1.0), constant_op.constant(2.0))
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm([], [], [op.name], init_global_variables=True, mode="vm")
assert execinfo.value.args[0].startswith("The following operators are not implemented")
def _test_spop_device_assignment():
# This test case is to test that TVM rejects inconsistent device assignment
# while using StatefulPartitionedCall/PartitionedCall operators which in case of TVM will
# be used as container graphs to internally execute "stateless" operations.
tf.reset_default_graph()
with tf.Graph().as_default():
def fun1(a):
with ops.device("/GPU:0"):
return tf.multiply(a, a)
def fun2(b):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:1"):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
with ops.device("/CPU:0"):
x = fun2(x)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:2"):
y = fun1(y)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:3"):
z = tf.add(x, y)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)], Tout=[dtypes.float32], f=fun3
)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
assert execinfo.value.args[0].startswith("Found inconsistent Device assignment")
def _test_spop_resource_variables():
# This test case is to test that TVM rejects any graph containing
# resource variables with StatefulPartitionedOp.
tf.reset_default_graph()
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32, use_resource=True)
var2 = tf.Variable(const2, dtype=tf.int32, use_resource=True)
@tf.function
def resourceVariablesTest(x, y):
return tf.multiply(x, y)
op = resourceVariablesTest(var1, var2)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
assert execinfo.value.args[0].startswith("Graph is not frozen." " Provide a frozen graph")
def test_forward_spop():
_test_spop_stateful()
_test_spop_device_assignment()
_test_spop_resource_variables()
# Placeholder test cases
_test_spop_placeholder_without_shape_info()
_test_spop_placeholder_with_shape_and_default_value()
_test_spop_placeholder_numpy_arange_feed()
_test_spop_placeholder_numpy_array_feed()
# Function Invocation test cases
_test_spop_function_invocation_basic()
_test_spop_function_invocation_nested()
_test_spop_function_invocation_no_autograph()
_test_spop_function_invocation_defun()
# Test cases for various other TF constructs
_test_spop_arithmetic()
_test_spop_control_flow()
_test_spop_variables()
_test_spop_constants()
#######################################################################
# Dynamic input shape
# -------------------
def test_forward_dynamic_input_shape():
tf.reset_default_graph()
with tf.Graph().as_default():
data = tf.placeholder(tf.float32, name="data", shape=(None,))
out = data + 1
np_data = np.random.uniform(size=(2,)).astype("float32")
out_name = "add"
with tf.Session() as sess:
graph_def = tf_testing.AddShapesToGraphDef(sess, out_name)
tf_output = run_tf_graph(sess, np_data, "data:0", ["{}:0".format(out_name)])
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
ctx = tvm.context(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def,
np_data,
["data"],
1,
target=device,
layout="NCHW",
out_names=[out_name],
mode="vm",
ignore_in_shape=True,
)
tvm.testing.assert_allclose(tvm_output[0], tf_output[0], rtol=1e-5, atol=1e-5)
def test_forward_dynmaic_rnn_lstmblockcell():
if package_version.parse(tf.VERSION) >= package_version.parse("2.0.0"):
return
total_series_length = 50000
truncated_backprop_length = 15
state_size = 4
echo_step = 3
batch_size = 5
num_layers = 5
def generateData():
x = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))
y = np.roll(x, echo_step)
y[0:echo_step] = 0
x = x.reshape((batch_size, -1)) # The first index changing slowest, subseries as rows
y = y.reshape((batch_size, -1))
return (x, y)
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])
init_state = tf.placeholder(tf.float32, [num_layers, 2, batch_size, state_size])
state_per_layer_list = tf.unstack(init_state, axis=0)
rnn_tuple_state = tuple(
[
tf.nn.rnn_cell.LSTMStateTuple(
state_per_layer_list[idx][0], state_per_layer_list[idx][1]
)
for idx in range(num_layers)
]
)
# Forward passes
def lstm_cell():
return tensorflow.contrib.rnn.LSTMBlockCell(state_size)
cell = tf.nn.rnn_cell.MultiRNNCell(
[lstm_cell() for _ in range(num_layers)], state_is_tuple=True
)
states_series, current_state = tf.nn.dynamic_rnn(
cell, tf.expand_dims(batchX_placeholder, -1), initial_state=rnn_tuple_state
)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
x, y = generateData()
_current_state = np.zeros((num_layers, 2, batch_size, state_size))
start_idx = 0
end_idx = start_idx + truncated_backprop_length
batchX = x[:, start_idx:end_idx]
# Save current state for TVM
current_state_tvm = _current_state
_current_state, _states_series = sess.run(
[current_state, states_series],
feed_dict={batchX_placeholder: batchX, init_state: _current_state},
)
# Organize results and corresponding names
tf_output = [_states_series]
for c in _current_state:
tf_output.append(c.c)
tf_output.append(c.h)
name = [states_series.name.split(":")[0]]
for t in current_state:
name.append(t.c.name.split(":")[0])
name.append(t.h.name.split(":")[0])
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(sess, graph_def, name)
tvm_output = run_tvm_graph(
final_graph_def,
[batchX.astype("float32"), current_state_tvm.astype("float32")],
["Placeholder", "Placeholder_1"],
out_names=name,
num_output=len(name),
mode="vm",
disabled_pass=["FoldScaleAxis"],
)
# Compare result
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
pytest.main([__file__])
|
display_server.py
|
#######################################################
# Display Server - Handle multiple requests by queuing
#######################################################
import queue
import threading
class DisplayServer:
def __init__(self, config):
try:
if config.model == 'SSD1306':
# v1 Hardware
from lib.display.ssd1306 import SSD1306_Display
self.display = SSD1306_Display(config.i2c_address, config.display_height,
config.font, config.status_size, config.preset_size)
elif config.model == 'HD44780':
# v2 Hardware
from lib.display.hd44780 import HD44780_Display
self.display = HD44780_Display(
config.i2c_address, config.port_expander, config.display_height, config.display_width)
except:
# Default to v1 OLED display
from lib.display.ssd1306 import SSD1306_Display
self.display = SSD1306_Display(config.i2c_address, config.display_height,
config.font, config.status_size, config.preset_size)
# Setup queue processor thread
self.process_queue = queue.Queue()
threading.Thread(target=self.queue_processor, daemon=True).start()
def clear_screen(self):
request = DisplayRequest('clear_screen', None)
self.process_queue.put_nowait(request)
def display_status(self, status):
request = DisplayRequest('display_status', (status,))
self.process_queue.put_nowait(request)
def show_selected_preset(self, preset, name=None, bpm=None):
request = DisplayRequest('show_selected_preset', (preset, name, bpm))
self.process_queue.put_nowait(request)
def show_unselected_preset(self, preset, name=None, bpm=None):
request = DisplayRequest('show_unselected_preset', (preset, name, bpm))
self.process_queue.put_nowait(request)
def tap_mode(self, tempo):
request = DisplayRequest('tap_mode', (tempo,))
self.process_queue.put_nowait(request)
def queue_processor(self):
while True:
request_item = self.process_queue.get(True)
if request_item.type == 'clear_screen':
self.display.clear_screen()
elif request_item.type == 'display_status':
self.display.display_status(request_item.params[0])
elif request_item.type == 'show_selected_preset':
self.display.show_selected_preset(
request_item.params[0], request_item.params[1], request_item.params[2])
elif request_item.type == 'show_unselected_preset':
self.display.show_unselected_preset(
request_item.params[0], request_item.params[1], request_item.params[2])
elif request_item.type == 'tap_mode':
self.display.tap_mode(request_item.params[0])
elif request_item.type == 'update_bpm':
self.display.update_bpm(request_item.params[0])
def update_bpm(self, bpm):
request = DisplayRequest('update_bpm', (bpm,))
self.process_queue.put_nowait(request)
class DisplayRequest:
def __init__(self, type, params):
self.type = type
self.params = params
|
test_pool.py
|
import threading
import time
from sqlalchemy import pool, select, event
import sqlalchemy as tsa
from sqlalchemy import testing
from sqlalchemy.testing.util import gc_collect, lazy_gc
from sqlalchemy.testing import eq_, assert_raises, is_not_, is_
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing import fixtures
import random
from sqlalchemy.testing.mock import Mock, call, patch
import weakref
join_timeout = 10
def MockDBAPI():
def cursor():
return Mock()
def connect(*arg, **kw):
return Mock(cursor=Mock(side_effect=cursor))
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect),
shutdown=shutdown,
is_shutdown=False)
return db
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
self._teardown_conns = []
def teardown(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _with_teardown(self, connection):
self._teardown_conns.append(weakref.ref(connection))
return connection
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return dbapi, pool.QueuePool(creator=lambda: dbapi.connect('foo.db'),
**kw)
class PoolTest(PoolTestBase):
def test_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=True)
c1 = manager.connect('foo.db')
c2 = manager.connect('foo.db')
c3 = manager.connect('bar.db')
c4 = manager.connect("foo.db", bar="bat")
c5 = manager.connect("foo.db", bar="hoho")
c6 = manager.connect("foo.db", bar="bat")
assert c1.cursor() is not None
assert c1 is c2
assert c1 is not c3
assert c4 is c6
assert c4 is not c5
def test_manager_with_key(self):
dbapi = MockDBAPI()
manager = pool.manage(dbapi, use_threadlocal=True)
c1 = manager.connect('foo.db', sa_pool_key="a")
c2 = manager.connect('foo.db', sa_pool_key="b")
c3 = manager.connect('bar.db', sa_pool_key="a")
assert c1.cursor() is not None
assert c1 is not c2
assert c1 is c3
eq_(dbapi.connect.mock_calls,
[
call("foo.db"),
call("foo.db"),
]
)
def test_bad_args(self):
manager = pool.manage(MockDBAPI())
manager.connect(None)
def test_non_thread_local_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=False)
connection = manager.connect('foo.db')
connection2 = manager.connect('foo.db')
self.assert_(connection.cursor() is not None)
self.assert_(connection is not connection2)
@testing.fails_on('+pyodbc',
"pyodbc cursor doesn't implement tuple __eq__")
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1, )]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (pool.SingletonThreadPool, pool.StaticPool,
pool.QueuePool, pool.NullPool, pool.AssertionPool):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_threadlocal_del(self):
self._do_testthreadlocal(useclose=False)
def test_threadlocal_close(self):
self._do_testthreadlocal(useclose=True)
def _do_testthreadlocal(self, useclose=False):
dbapi = MockDBAPI()
for p in pool.QueuePool(creator=dbapi.connect,
pool_size=3, max_overflow=-1,
use_threadlocal=True), \
pool.SingletonThreadPool(creator=dbapi.connect,
use_threadlocal=True):
c1 = p.connect()
c2 = p.connect()
self.assert_(c1 is c2)
c3 = p.unique_connection()
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
c2 = p.connect()
self.assert_(c1 is c2)
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
if useclose:
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
c3.close()
c2.close()
self.assert_(c1.connection is not None)
c1.close()
c1 = c2 = c3 = None
# extra tests with QueuePool to ensure connections get
# __del__()ed when dereferenced
if isinstance(p, pool.QueuePool):
lazy_gc()
self.assert_(p.checkedout() == 0)
c1 = p.connect()
c2 = p.connect()
if useclose:
c2.close()
c1.close()
else:
c2 = None
c1 = None
lazy_gc()
self.assert_(p.checkedout() == 0)
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info['foo'] = 'bar'
c.close()
del c
c = p.connect()
self.assert_('foo' in c.info)
c.invalidate()
c = p.connect()
self.assert_('foo' not in c.info)
c.info['foo2'] = 'bar2'
c.detach()
self.assert_('foo2' in c.info)
c2 = p.connect()
is_not_(c.connection, c2.connection)
assert not c2.info
assert 'foo2' in c.info
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
def do_rollback(self, dbapi_connection):
canary.append('R')
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append('C')
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append('CL')
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ['R', 'CL', 'R'])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ['R', 'CL', 'R'])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ['R', 'CL', 'R'])
def test_null_pool(self):
self._do_test(pool.NullPool, ['R', 'CL', 'R', 'CL'])
def test_static_pool(self):
self._do_test(pool.StaticPool, ['R', 'R'])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append('first_connect')
event.listen(p, 'first_connect', first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append('connect')
event.listen(p, 'connect', connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append('checkout')
event.listen(p, 'checkout', checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkin(*arg, **kw):
canary.append('checkin')
event.listen(p, 'checkin', checkin)
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append('reset')
event.listen(p, 'reset', reset)
return p, canary
def _invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'invalidate', canary)
return p, canary
def _soft_invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'soft_invalidate', canary)
return p, canary
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
eq_(canary, ['connect'])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
eq_(canary, ['checkout'])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['reset'])
def test_soft_invalidate_event_no_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate(soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_soft_invalidate_event_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc, soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_invalidate_event_no_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate()
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_invalidate_event_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_checkin_event_gc(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
del c1
lazy_gc()
eq_(canary, ['checkin'])
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
c2.close()
eq_(canary, ['checkin', 'checkin'])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, 'connect', listen_one)
event.listen(engine.pool, 'connect', listen_two)
event.listen(engine, 'connect', listen_three)
event.listen(engine.__class__, 'connect', listen_four)
engine.execute(select([1])).close()
eq_(
canary,
["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to
that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, 'connect', listen_one)
event.listen(pool.QueuePool, 'connect', listen_two)
event.listen(pool.SingletonThreadPool, 'connect', listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def teardown(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class PoolFirstConnectSyncTest(PoolTestBase):
# test [ticket:2964]
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, 'first_connect')
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, 'connect')
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
for j in range(2):
c1 = pool.connect()
time.sleep(.02)
c1.close()
time.sleep(.02)
threads = []
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
eq_(evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect()]
)
class DeprecatedPoolListenerTest(PoolTestBase):
@testing.requires.predictable_gc
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners(self):
class InstrumentingListener(object):
def __init__(self):
if hasattr(self, 'connect'):
self.connect = self.inst_connect
if hasattr(self, 'first_connect'):
self.first_connect = self.inst_first_connect
if hasattr(self, 'checkout'):
self.checkout = self.inst_checkout
if hasattr(self, 'checkin'):
self.checkin = self.inst_checkin
self.clear()
def clear(self):
self.connected = []
self.first_connected = []
self.checked_out = []
self.checked_in = []
def assert_total(innerself, conn, fconn, cout, cin):
eq_(len(innerself.connected), conn)
eq_(len(innerself.first_connected), fconn)
eq_(len(innerself.checked_out), cout)
eq_(len(innerself.checked_in), cin)
def assert_in(innerself, item, in_conn, in_fconn,
in_cout, in_cin):
self.assert_((item in innerself.connected) == in_conn)
self.assert_((item in innerself.first_connected) == in_fconn)
self.assert_((item in innerself.checked_out) == in_cout)
self.assert_((item in innerself.checked_in) == in_cin)
def inst_connect(self, con, record):
print("connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.connected.append(con)
def inst_first_connect(self, con, record):
print("first_connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.first_connected.append(con)
def inst_checkout(self, con, record, proxy):
print("checkout(%s, %s, %s)" % (con, record, proxy))
assert con is not None
assert record is not None
assert proxy is not None
self.checked_out.append(con)
def inst_checkin(self, con, record):
print("checkin(%s, %s)" % (con, record))
# con can be None if invalidated
assert record is not None
self.checked_in.append(con)
class ListenAll(tsa.interfaces.PoolListener, InstrumentingListener):
pass
class ListenConnect(InstrumentingListener):
def connect(self, con, record):
pass
class ListenFirstConnect(InstrumentingListener):
def first_connect(self, con, record):
pass
class ListenCheckOut(InstrumentingListener):
def checkout(self, con, record, proxy, num):
pass
class ListenCheckIn(InstrumentingListener):
def checkin(self, con, record):
pass
def assert_listeners(p, total, conn, fconn, cout, cin):
for instance in (p, p.recreate()):
self.assert_(len(instance.dispatch.connect) == conn)
self.assert_(len(instance.dispatch.first_connect) == fconn)
self.assert_(len(instance.dispatch.checkout) == cout)
self.assert_(len(instance.dispatch.checkin) == cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0, 0)
p.add_listener(ListenAll())
assert_listeners(p, 1, 1, 1, 1, 1)
p.add_listener(ListenConnect())
assert_listeners(p, 2, 2, 1, 1, 1)
p.add_listener(ListenFirstConnect())
assert_listeners(p, 3, 2, 2, 1, 1)
p.add_listener(ListenCheckOut())
assert_listeners(p, 4, 2, 2, 2, 1)
p.add_listener(ListenCheckIn())
assert_listeners(p, 5, 2, 2, 2, 2)
del p
snoop = ListenAll()
p = self._queuepool_fixture(listeners=[snoop])
assert_listeners(p, 1, 1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
cc = c.connection
snoop.assert_in(cc, True, True, True, False)
c.close()
snoop.assert_in(cc, True, True, True, True)
del c, cc
snoop.clear()
# this one depends on immediate gc
c = p.connect()
cc = c.connection
snoop.assert_in(cc, False, False, True, False)
snoop.assert_total(0, 0, 1, 0)
del c, cc
lazy_gc()
snoop.assert_total(0, 0, 1, 1)
p.dispose()
snoop.clear()
c = p.connect()
c.close()
c = p.connect()
snoop.assert_total(1, 0, 2, 1)
c.close()
snoop.assert_total(1, 0, 2, 2)
# invalidation
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.invalidate()
snoop.assert_total(1, 0, 1, 1)
c.close()
snoop.assert_total(1, 0, 1, 1)
del c
lazy_gc()
snoop.assert_total(1, 0, 1, 1)
c = p.connect()
snoop.assert_total(2, 0, 2, 1)
c.close()
del c
lazy_gc()
snoop.assert_total(2, 0, 2, 2)
# detached
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.detach()
snoop.assert_total(1, 0, 1, 0)
c.close()
del c
snoop.assert_total(1, 0, 1, 0)
c = p.connect()
snoop.assert_total(2, 0, 2, 0)
c.close()
del c
snoop.assert_total(2, 0, 2, 1)
# recreated
p = p.recreate()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
c.close()
snoop.assert_total(1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 2, 1)
c.close()
snoop.assert_total(1, 1, 2, 2)
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners_callables(self):
def connect(dbapi_con, con_record):
counts[0] += 1
def checkout(dbapi_con, con_record, con_proxy):
counts[1] += 1
def checkin(dbapi_con, con_record):
counts[2] += 1
i_all = dict(connect=connect, checkout=checkout, checkin=checkin)
i_connect = dict(connect=connect)
i_checkout = dict(checkout=checkout)
i_checkin = dict(checkin=checkin)
for cls in (pool.QueuePool, pool.StaticPool):
counts = [0, 0, 0]
def assert_listeners(p, total, conn, cout, cin):
for instance in (p, p.recreate()):
eq_(len(instance.dispatch.connect), conn)
eq_(len(instance.dispatch.checkout), cout)
eq_(len(instance.dispatch.checkin), cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0)
p.add_listener(i_all)
assert_listeners(p, 1, 1, 1, 1)
p.add_listener(i_connect)
assert_listeners(p, 2, 1, 1, 1)
p.add_listener(i_checkout)
assert_listeners(p, 3, 1, 1, 1)
p.add_listener(i_checkin)
assert_listeners(p, 4, 1, 1, 1)
del p
p = self._queuepool_fixture(listeners=[i_all])
assert_listeners(p, 1, 1, 1, 1)
c = p.connect()
assert counts == [1, 1, 0]
c.close()
assert counts == [1, 1, 1]
c = p.connect()
assert counts == [1, 2, 1]
p.add_listener(i_checkin)
c.close()
assert counts == [1, 2, 2]
class QueuePoolTest(PoolTestBase):
def test_queuepool_del(self):
self._do_testqueuepool(useclose=False)
def test_queuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1)
def status(pool):
return pool.size(), pool.checkedin(), pool.overflow(), \
pool.checkedout()
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
lazy_gc()
assert not pool._refs
@testing.requires.timing_intensive
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=0,
timeout=2)
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
now = time.time()
try:
c4 = p.connect()
assert False
except tsa.exc.TimeoutError:
assert int(time.time() - now) == 2
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=.05),
pool_size=2,
max_overflow=1, use_threadlocal=False, timeout=3)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
gc_collect()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(creator=creator,
pool_size=3, timeout=2,
max_overflow=max_overflow)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
lazy_gc()
assert not pool._refs
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
time.sleep(2)
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect())
c2 = self._with_teardown(p.connect())
c3 = self._with_teardown(p.connect())
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_hanging_connect_within_overflow(self):
"""test that a single connect() call which is hanging
does not block other connections from proceeding."""
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(
target=run_test, args=("success_one", p, False)),
threading.Thread(
target=run_test, args=("success_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_one", p, True)),
threading.Thread(
target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False))
]
for t in threads:
t.start()
time.sleep(.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[call("success_one"), call("success_two"),
call("overflow_two"), call("overflow_three"),
call("overflow_one")]
)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
mutex.acquire()
try:
return dbapi.connect()
finally:
mutex.release()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(creator=creator,
pool_size=2, timeout=timeout,
max_overflow=max_overflow)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(.1)
conn.close()
c1 = p.connect()
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(target=waiter,
args=(p, timeout, max_overflow))
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(.2)
p._invalidate(c2)
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
def test_connrec_invalidated_within_checkout_no_race(self):
"""Test that a concurrent ConnectionRecord.invalidate() which
occurs after the ConnectionFairy has called _ConnectionRecord.checkout()
but before the ConnectionFairy tests "fairy.connection is None"
will not result in an InvalidRequestError.
This use case assumes that a listener on the checkout() event
will be raising DisconnectionError so that a reconnect attempt
may occur.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0)
conn = p.connect()
conn.close()
_existing_checkout = pool._ConnectionRecord.checkout
@classmethod
def _decorate_existing_checkout(cls, *arg, **kw):
fairy = _existing_checkout(*arg, **kw)
connrec = fairy._connection_record
connrec.invalidate()
return fairy
with patch(
"sqlalchemy.pool._ConnectionRecord.checkout",
_decorate_existing_checkout):
conn = p.connect()
is_(conn._connection_record.connection, None)
conn.close()
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(creator=creator,
pool_size=1, timeout=None,
max_overflow=0)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1, ))
t.start()
threads.append(t)
time.sleep(.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(creator=dbapi.connect,
pool_size=2, timeout=None,
max_overflow=0)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.connection
c2_con = c2.connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is c2_con
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_mixed_close(self):
pool._refs.clear()
p = self._queuepool_fixture(pool_size=3, max_overflow=-1,
use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = None
assert p.checkedout() == 1
c1 = None
lazy_gc()
assert p.checkedout() == 0
lazy_gc()
assert not pool._refs
def test_overflow_no_gc_tlocal(self):
self._test_overflow_no_gc(True)
def test_overflow_no_gc(self):
self._test_overflow_no_gc(False)
def _test_overflow_no_gc(self, threadlocal):
p = self._queuepool_fixture(pool_size=2,
max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0])
)
@testing.requires.predictable_gc
def test_weakref_kaboom(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2 = None
del c1
del c2
gc_collect()
assert p.checkedout() == 0
c3 = p.connect()
assert c3 is not None
def test_trick_the_counter(self):
"""this is a "flaw" in the connection pool; since threadlocal
uses a single ConnectionFairy per thread with an open/close
counter, you can fool the counter into giving you a
ConnectionFairy with an ambiguous counter. i.e. its not true
reference counting."""
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = p.connect()
c2.close()
self.assert_(p.checkedout() != 0)
c2.close()
self.assert_(p.checkedout() == 0)
def test_recycle(self):
p = self._queuepool_fixture(pool_size=1,
max_overflow=0,
recycle=3)
c1 = p.connect()
c_id = id(c1.connection)
c1.close()
c2 = p.connect()
assert id(c2.connection) == c_id
c2.close()
time.sleep(4)
c3 = p.connect()
assert id(c3.connection) != c_id
@testing.requires.timing_intensive
def test_recycle_on_invalidate(self):
p = self._queuepool_fixture(pool_size=1,
max_overflow=0)
c1 = p.connect()
c_id = id(c1.connection)
c1.close()
c2 = p.connect()
assert id(c2.connection) == c_id
c2_rec = c2._connection_record
p._invalidate(c2)
assert c2_rec.connection is None
c2.close()
time.sleep(.5)
c3 = p.connect()
assert id(c3.connection) != c_id
@testing.requires.timing_intensive
def test_recycle_on_soft_invalidate(self):
p = self._queuepool_fixture(pool_size=1,
max_overflow=0)
c1 = p.connect()
c_id = id(c1.connection)
c1.close()
c2 = p.connect()
assert id(c2.connection) == c_id
c2_rec = c2._connection_record
c2.invalidate(soft=True)
assert c2_rec.connection is c2.connection
c2.close()
time.sleep(.5)
c3 = p.connect()
assert id(c3.connection) != c_id
assert c3._connection_record is c2_rec
assert c2_rec.connection is c3.connection
def _no_wr_finalize(self):
finalize_fairy = pool._finalize_fairy
def assert_no_wr_callback(
connection, connection_record,
pool, ref, echo, fairy=None):
if fairy is None:
raise AssertionError(
"finalize fairy was called as a weakref callback")
return finalize_fairy(
connection, connection_record, pool, ref, echo, fairy)
return patch.object(
pool, '_finalize_fairy', assert_no_wr_callback)
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises(
Exception,
p.connect
)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect())
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect())
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1,
max_overflow=2, recycle=1)
c1 = p.connect()
c1.close()
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1,
max_overflow=2)
c1 = p.connect()
c1.close()
@event.listens_for(p, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if dbapi.is_shutdown:
raise tsa.exc.DisconnectionError()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_recycle_pool_no_race(self):
def slow_close():
slow_closing_connection._slow_close()
time.sleep(.5)
slow_closing_connection = Mock()
slow_closing_connection.connect.return_value.close = slow_close
class Error(Exception):
pass
dialect = Mock()
dialect.is_disconnect = lambda *arg, **kw: True
dialect.dbapi.Error = Error
pools = []
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
super(TrackQueuePool, self).__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
p1 = TrackQueuePool(creator=creator, pool_size=20)
from sqlalchemy import create_engine
eng = create_engine(testing.db.url, pool=p1, _initialize=False)
eng.dialect = dialect
# 15 total connections
conns = [eng.connect() for i in range(15)]
# return 8 back to the pool
for conn in conns[3:10]:
conn.close()
def attempt(conn):
time.sleep(random.random())
try:
conn._handle_dbapi_exception(Error(), "statement", {},
Mock(), Mock())
except tsa.exc.DBAPIError:
pass
# run an error + invalidate operation on the remaining 7 open
#connections
threads = []
for conn in conns:
t = threading.Thread(target=attempt, args=(conn, ))
t.start()
threads.append(t)
for t in threads:
t.join()
# return all 15 connections to the pool
for conn in conns:
conn.close()
# re-open 15 total connections
conns = [eng.connect() for i in range(15)]
# 15 connections have been fully closed due to invalidate
assert slow_closing_connection._slow_close.call_count == 15
# 15 initial connections + 15 reconnections
assert slow_closing_connection.connect.call_count == 30
assert len(pools) <= 2, len(pools)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(reset_on_return=None, pool_size=1,
max_overflow=0)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._use_threadlocal is False
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect()
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.connection
c1.invalidate()
assert c1.connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.connection is not c1_con
c2_con = c2.connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_threadfairy(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1,
use_threadlocal=True)
c1 = p.connect()
c1.close()
c2 = p.connect()
assert c2.connection is not None
class ResetOnReturnTest(PoolTestBase):
def _fixture(self, **kw):
dbapi = Mock()
return dbapi, pool.QueuePool(creator=lambda: dbapi.connect('foo.db'),
**kw)
def test_plain_rollback(self):
dbapi, p = self._fixture(reset_on_return='rollback')
c1 = p.connect()
c1.close()
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_plain_commit(self):
dbapi, p = self._fixture(reset_on_return='commit')
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_plain_none(self):
dbapi, p = self._fixture(reset_on_return=None)
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_rollback(self):
dbapi, p = self._fixture(reset_on_return='rollback')
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert dbapi.connect().special_rollback.called
assert not dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 1)
eq_(dbapi.connect().special_commit.call_count, 0)
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_commit(self):
dbapi, p = self._fixture(reset_on_return='commit')
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert not dbapi.connect().special_rollback.called
assert dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 0)
eq_(dbapi.connect().special_commit.call_count, 1)
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
@testing.requires.threading_with_mock
def test_cleanup_no_gc(self):
self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(.1)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(p._all_conns) == 3
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect()
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls([
call('foo.db'),
call('foo.db')],
any_order=True)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
creator = lambda: dbapi.connect('foo.db')
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
class CreatorCompatibilityTest(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
|
face_detector_tutorial_node.py
|
#!/usr/bin/env python
import rospy
import numpy as np
import math
from duckietown_msgs.msg import Twist2DStamped
from sensor_msgs.msg import CompressedImage, Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
import sys
import time
import threading
class face_detector_wama(object):
def __init__(self):
self.node_name = rospy.get_name()
self.thread_lock = threading.Lock()
self.active = True
# to do: initial no-faces-detected as face_detected senario
self.face_detected = XXX
self.bridge = CvBridge()
# Publicaiton
# To do : publish ros message topic: /node_name/car_cmd, datatype: Twist2DStamped
self.pub_car_cmd = rospy.Publisher("XXX",XXX,queue_size=1)
# To do : publish ros message topic: /node_name/image_with_face, datatype: Image
self.pub_image_face = rospy.Publisher("XXX", XXX, queue_size=1)
# Subscription
# To do : subscribe ros message topic: /node_name/joystick_car_cmd datatype: Twist2DStamped, callback function: self.cbJoystick
self.sub_joystick_car_cmd = rospy.Subscriber("XXX", XXX, XXX,queue_size=1)
# To do : subscribe ros message topic: /node_name/image, datatype: CompressedImage, callback function: self.cbImage
self.sub_image_origin = rospy.Subscriber("XXX", XXX, XXX, queue_size=1)
# safe shutdown
rospy.on_shutdown(self.custom_shutdown)
# timer
rospy.loginfo("[%s] Initialized " %(rospy.get_name()))
def custom_shutdown(self):
rospy.loginfo("[%s] Shutting down..." %self.node_name)
# Send stop command
car_control_msg = Twist2DStamped()
car_control_msg.v = 0.0
car_control_msg.omega = 0.0
self.publishCmd(car_control_msg)
rospy.sleep(0.5) #To make sure that it gets published.
rospy.loginfo("[%s] Shutdown" %self.node_name)
def cbImage(self, image_msg):
if not self.active:
return
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
def processImage(self, image_msg):
if not self.thread_lock.acquire(False):
return
try:
self.cbFacedetect(image_msg)
finally:
self.thread_lock.release()
def publishCmd(self,car_cmd_msg):
# to do: using pub_car_cmd publisher we initialed at line 24 to publish car command message
self.XXX.publish(XXX)
def cbJoystick(self,car_cmd_msg):
# to do: if face_detected senario is no-face-detected, keep joystikck command as car control command
if self.face_detected == XXX:
# to do: initial a car commad message for publish, datatype: Twist2DStamped
car_control_msg = XXX
# to do: using joystikck command as car command
car_control_msg.v = XXX.v
car_control_msg.omega = XXX.omega
# to do: publish car control command
self.publishCmd(XXX)
def cbFacedetect(self, image_msg):
# Decompress image and convert ROS image message to cv image
narr = np.fromstring(image_msg.data, np.uint8)
image = cv2.imdecode(narr, cv2.CV_LOAD_IMAGE_COLOR)
# Initial opencv CascadeClassifier class to detect objects and import face detection module
faceCascade = cv2.CascadeClassifier('/home/ubuntu/duckietown/catkin_ws/src/spring2016_nctu/wama/face_detector/src/haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(gray,scaleFactor=2,minNeighbors=5,minSize=(10, 10),flags = cv2.cv.CV_HAAR_SCALE_IMAGE)
print "Found {0} faces!".format(len(faces))
# Draw face detections region proposals in the image
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Convert cv image to ROS image message
image_msg_out = self.bridge.cv2_to_imgmsg(image, "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
# to do: using pub_image_face publisher we initialed at line 27 to publish image with face region proposals
self.XXX.publish(XXX)
# to do: initial a car commad message for publish, datatype: Twist2DStamped
car_control_msg = XXX
# to do: if faces detected, using stop command as car control command
if len(faces) != XXX:
# to do: set faces-detected as face_detected senario
self.face_detected = XXX
# to do: use stop command as car command
car_control_msg.v = XXX
car_control_msg.omega = XXX
# to do: publish car control command
self.publishCmd(XXX)
# to do: if no faces detected, set no-faces-detected as face_detected senario
if len(faces) == XXX:
# to do: set no-faces-detected as face_detected senario
self.face_detected = XXX
#car_control_msg.v=0
#car_control_msg.omega=0
#self.publishCmd(car_control_msg)
if __name__ == "__main__":
# to do: initial a node named "face_detector_X", X= you duckiebot name
rospy.init_node("XXX",anonymous=False)
face_detector_wama_node = face_detector_wama()
rospy.spin()
|
__init__.py
|
from .. import db
from sqlalchemy.exc import DatabaseError
from threading import Thread
def add_to_database(obj):
try:
db.session.add(obj)
db.session.commit()
return True
except DatabaseError:
db.session.rollback()
return False
def wait_in_other_thread(func, callback):
def target():
rv = func()
callback(rv)
t = Thread(target=target)
t.run()
|
bot.py
|
#!/usr/bin/env python
# (c) 2011 Anton Romanov
#
#
"""
"""
import os, imp, sys,threading,inspect
import re, struct
import socket, asyncore, asynchat
from hon import masterserver,packets
from struct import unpack
from hon.honutils import normalize_nick
import time
from hon.honutils import normalize_nick
from utils.dep import dep
home = os.getcwd()
class Store:pass
class Bot( asynchat.async_chat ):
store = Store()
#thread-safety, kinda
def initiate_send(self):
self.sending.acquire()
asynchat.async_chat.initiate_send(self)
self.sending.release()
def err(self, msg):
caller = inspect.stack()
print("Error: {0} ({1}:{2} - {3})".format(msg, caller[1][1], caller[1][2], time.strftime('%X')))
def __init__( self,config ):
asynchat.async_chat.__init__( self )
self.verbose = True
self.config = config
self.nick = self.config.nick
self.buffer = ''
self.doc = {}
self.id2nick = {}
self.nick2id = {}
self.chan2id = {}
self.id2chan = {}
self.id2clan = {}
self.nick2clan = {}
self.setup()
self.sending = threading.Lock()
self.cooldowns = {}
self.channel_cooldowns = {}
self.clan_status = {}
self.user_status = {}
#self.writelock = threading.Lock()
#self.sleep = time.time() - 10
#self.send_threshold = 1
self.ac_in_buffer_size = 2
#self.ac_out_buffer_size = 2
self.connection_timeout_threshold = 60
self.connection_timeout = time.time() + 5
def readable(self):
if time.time() - self.connection_timeout >= self.connection_timeout_threshold:
self.close()
return False
return True
def write_packet(self,packet_id,*args):
data = packets.pack(packet_id,*args)
self.write(struct.pack('<H',len(data)))
self.write(data)
def write( self, data ):
#self.writelock.acquire()
#to_sleep = time.time() - self.sleep - self.send_threshold
#if to_sleep < 0:
#time.sleep(-to_sleep)
self.push(data)
#self.sleep = time.time()
#self.writelock.release()
#def handle_close( self ):
#print 'disconnected'
def handle_connect( self ):
print ('socket connected')
self.set_terminator(2)
self.got_len = False
self.write_packet(packets.ID.HON_CS_AUTH_INFO,self.account_id,
self.cookie,self.ip,self.auth_hash,packets.ID.HON_PROTOCOL_VERSION,0x383,0,5,4,'lac',0)
def collect_incoming_data( self, data ):
self.buffer += data
def found_terminator( self ):
if self.got_len:
self.set_terminator(2)
self.dispatch(self.buffer)
else:
self.set_terminator(unpack("<H",self.buffer)[0])
self.buffer = ''
self.got_len = not self.got_len
def masterserver_request(self,query, path = None,decode = True, cookie = False):
if cookie:
query['cookie'] = self.cookie
response = masterserver.request(query,path = path,decode = decode)
if response and 'cookie' in response and response[0] == False:
print('cookie expired, renewing')
self.auth()
#return self.masterserver_request(query,path,decode,cookie)
return response
def honapi_request(self, query):
if not hasattr(self.config, 'api_key') or len(self.config.api_key) == 0:
print("HoNAPI key not set")
return None
response = masterserver.api_request(self.config.api_key, query)
return response
def auth(self):
auth_data = masterserver.auth(self.config.nick,self.config.password)
if 'ip' not in auth_data or 'auth_hash' not in auth_data:
print("Login Failure")
return False
self.ip = auth_data['ip']
self.cookie = auth_data['cookie']
self.account_id = int(auth_data['account_id'])
self.auth_hash = auth_data['auth_hash']
self.got_len = False
self.nick = auth_data['nickname']
self.id2nick[self.account_id] = self.nick
self.nick2id[self.nick] = self.account_id
if "clan_member_info" in auth_data:
self.clan_info = auth_data["clan_member_info"]
else:
self.clan_info = {}
if "clan_roster" in auth_data and "error" not in auth_data["clan_roster"]:
self.clan_roster = auth_data["clan_roster"]
else:
self.clan_roster = {}
if "buddy_list" in auth_data:
buddy_list = auth_data["buddy_list"]
else:
buddy_list = {}
self.buddy_list = {}
for id in self.clan_roster:
if self.clan_roster[id]['nickname']:
nick = normalize_nick(self.clan_roster[id]['nickname']).lower()
self.id2nick[id] = nick
self.nick2id[nick] = id
for buddies in buddy_list.values():
for buddy in buddies.values():
try:
id = int(buddy['buddy_id'])
self.buddy_list[id] = buddy
nick = normalize_nick(buddy['nickname'])
self.id2nick[id] = nick
self.nick2id[nick] = id
except:pass
return auth_data
def run(self):
auth_data = self.auth()
if auth_data is False:
return
self.create_socket( socket.AF_INET, socket.SOCK_STREAM )
self.connect( ( auth_data['chat_url'], int(auth_data['chat_port']) ) )
asyncore.loop()
def setup(self):
masterserver.set_region(self.config.region)
self.variables = {}
filenames = []
if not hasattr(self.config, 'enable'):
for fn in os.listdir(os.path.join(home, 'modules')):
if fn.endswith('.py') and not fn.startswith('_'):
filenames.append(os.path.join(home, 'modules', fn))
else:
for fn in self.config.enable:
filenames.append(os.path.join(home, 'modules', fn + '.py'))
if hasattr(self.config, 'extra'):
for fn in self.config.extra:
if os.path.isfile(fn):
filenames.append(fn)
elif os.path.isdir(fn):
for n in os.listdir(fn):
if n.endswith('.py') and not n.startswith('_'):
filenames.append(os.path.join(fn, n))
modules = []
excluded_modules = getattr(self.config, 'exclude', [])
deps = {}
imp_modules = {}
for filename in filenames:
name = os.path.basename(filename)[:-3]
if name in excluded_modules: continue
# if name in sys.modules:
# del sys.modules[name]
try: module = imp.load_source(name, filename)
except Exception, e:
print >> sys.stderr, "Error loading %s: %s (in bot.py)" % (name, e)
else:
if hasattr(module, 'depend'):
deps[name] = module.depend
else:
deps[name] = []
#make every module depend on config
if 'config' not in deps[name] and name != 'config':
deps[name].append('config')
imp_modules[name] = module
deps = dep(deps)
for s in deps:
for name in s:
module = imp_modules[name]
if hasattr(module, 'setup'):
module.setup(self)
self.register(vars(module))
modules.append(name)
if modules:
print 'Registered modules:', ', '.join(modules)
else: print >> sys.stderr, "Warning: Couldn't find any modules"
self.bind_commands()
def error(self, origin):
try:
import traceback
trace = traceback.format_exc()
print trace
lines = list(reversed(trace.splitlines()))
report = [lines[0].strip()]
for line in lines:
line = line.strip()
if line.startswith('File "/'):
report.append(line[0].lower() + line[1:])
break
else: report.append('source unknown')
print(report[0] + ' (' + report[1] + ')')
except: print("Got an error.")
def register(self, variables):
# This is used by reload.py, hence it being methodised
for name, obj in variables.iteritems():
if hasattr(obj, 'commands') or hasattr(obj, 'rule') or hasattr(obj,'event'):
self.variables[name] = obj
def bind_commands(self):
self.commands = {'high': {}, 'medium': {}, 'low': {}}
def bind(self, priority, regexp, func):
#print priority, regexp.pattern.encode('utf-8'), func
# register documentation
if not hasattr(func, 'name'):
func.name = func.__name__
if func.__doc__:
if hasattr(func, 'example'):
example = func.example
example = example.replace('$nickname', self.nick)
else: example = None
self.doc[func.name] = (func.__doc__, example)
self.commands[priority].setdefault(regexp, []).append(func)
def sub(pattern, self=self):
# These replacements have significant order
pattern = pattern.replace('$nickname', re.escape(self.nick))
return pattern.replace('$nick', r'%s[,:] +' % re.escape(self.nick))
for name, func in self.variables.iteritems():
#print name, func
if not hasattr(func, 'priority'):
func.priority = 'medium'
if not hasattr(func, 'thread'):
func.thread = True
if not hasattr(func, 'event'):
func.event = [packets.ID.HON_SC_WHISPER,packets.ID.HON_SC_PM,packets.ID.HON_SC_CHANNEL_MSG]
if hasattr(func, 'rule'):
if isinstance(func.rule, str):
pattern = sub(func.rule)
regexp = re.compile(pattern)
bind(self, func.priority, regexp, func)
if isinstance(func.rule, tuple):
# 1) e.g. ('$nick', '(.*)')
if len(func.rule) == 2 and isinstance(func.rule[0], str):
prefix, pattern = func.rule
prefix = sub(prefix)
regexp = re.compile(prefix + pattern)
bind(self, func.priority, regexp, func)
# 2) e.g. (['p', 'q'], '(.*)')
elif len(func.rule) == 2 and isinstance(func.rule[0], list):
prefix = self.config.prefix
commands, pattern = func.rule
for command in commands:
command = r'(%s)\b(?: +(?:%s))?' % (command, pattern)
regexp = re.compile(prefix + command)
bind(self, func.priority, regexp, func)
# 3) e.g. ('$nick', ['p', 'q'], '(.*)')
elif len(func.rule) == 3:
prefix, commands, pattern = func.rule
prefix = sub(prefix)
for command in commands:
command = r'(%s) +' % command
regexp = re.compile(prefix + command + pattern)
bind(self, func.priority, regexp, func)
if hasattr(func, 'commands'):
for command in func.commands:
template = r'^%s(%s)(?: +(.*))?$'
pattern = template % (self.config.prefix, command)
regexp = re.compile(pattern)
bind(self, func.priority, regexp, func)
if not hasattr(func,'commands') and not hasattr(func,'rule'):
bind(self,func.priority,None,func)
def wrapped(self, origin, input, data, match):
class PhennyWrapper(object):
def __init__(self, phenny):
self.bot = phenny
def send_msg(self,input,origin):
pass
def __getattr__(self, attr):
#sender = origin.sender or text
#if attr == 'reply':
#return (lambda msg:
#self.bot.msg(sender, origin.nick + ': ' + msg))
#elif attr == 'say':
#return lambda msg: self.bot.msg(sender, msg)
if attr in ['reply','say']:
#emote instead of channel message
if origin[0] == packets.ID.HON_SC_CHANNEL_MSG:
origin[0] = packets.ID.HON_SC_CHANNEL_EMOTE
if origin[0] in [packets.ID.HON_SC_CHANNEL_MSG,packets.ID.HON_SC_CHANNEL_EMOTE]:
#prevent channel overspam
t = time.time()
if not input.admin and input.nick not in self.bot.config.channel_whitelist:
if origin[2] not in self.bot.channel_cooldowns or \
( origin[2] in self.bot.channel_cooldowns and \
t - self.bot.channel_cooldowns[origin[2]]\
>= self.bot.config.channel_cooldown):
self.bot.channel_cooldowns[origin[2]] = t
else:
origin[0] = packets.ID.HON_SC_WHISPER
origin[1] = input.nick
prefix = ''
if origin[0] == packets.ID.HON_SC_CHANNEL_EMOTE:
prefix = self.config.replyprefix
if attr == 'reply':
if origin[0] in [packets.ID.HON_SC_CHANNEL_MSG,packets.ID.HON_SC_CHANNEL_EMOTE]:
return (lambda msg:
self.bot.write_packet(origin[0], prefix + self.id2nick[origin[1]] + ': ' + msg,
origin[2]))
else:
return (lambda msg:
self.bot.write_packet(origin[0],origin[1], prefix + msg))
elif attr == 'say':
if origin[0] in [packets.ID.HON_SC_CHANNEL_MSG,packets.ID.HON_SC_CHANNEL_EMOTE]:
return (lambda msg:
self.bot.write_packet(origin[0], prefix + msg,origin[2]))
else:
return (lambda msg:
self.bot.write_packet(origin[0],origin[1],msg))
return getattr(self.bot, attr)
return PhennyWrapper(self)
def call(self, func, origin, phenny, *input):
try:
if hasattr(self.config, 'bad_commands') and \
hasattr(func, 'commands') and \
any(cmd for cmd in func.commands if cmd in self.config.bad_commands) and \
hasattr(input, 'owner') and not input.owner:
return
if hasattr(self.config, 'clan_use') and \
hasattr(input, 'account_id') and \
self.config.clan_use and \
input.account_id not in self.clan_roster and \
not input.admin:
return
if func(phenny, *input) is False:
self.noauth(*input)
except Exception, e:
self.error(origin)
def input(self, origin, text, data, match):
class CommandInput(unicode):
def __new__(cls, text, origin, data, match):
s = unicode.__new__(cls, text)
s.origin = origin
#s.sender = origin.sender
#s.nick = origin.nick
s.data = data
s.match = match
s.group = match.group
s.groups = match.groups
if isinstance(origin[1],unicode):
origin[1] = normalize_nick(origin[1])
s.nick = origin[1]
try:
s.account_id = self.nick2id[s.nick.lower()]
except:
s.account_id = -1
elif isinstance(origin[1],int):
s.account_id = origin[1]
try:
s.nick = self.id2nick[origin[1]]
except:
s.nick = ''
else:
s.nick = None
s.account_id = None
if isinstance( self.config.owner, list ):
s.owner = s.nick.lower() in [o.lower() for o in self.config.owner]
else:
s.owner = s.nick.lower() == self.config.owner.lower()
s.admin = s.owner or s.nick.lower() in self.config.admins
s.admin = s.admin or hasattr(self.config,'clan_admin') and self.config.clan_admin and s.account_id in self.clan_roster
if not s.admin and hasattr(self.config,'officer_admin') and \
self.config.officer_admin and s.account_id is not None and\
s.account_id in self.clan_roster and\
self.clan_roster[s.account_id]['rank'] != 'Member':
s.admin = True
return s
return CommandInput(text, origin, data, match)
def dispatch(self,data):
self.connection_timeout = time.time()
origin,data = packets.parse_packet(data)
packet_id = origin[0]
for priority in ('high', 'medium', 'low'):
items = self.commands[priority].items()
for regexp, funcs in items:
for func in funcs:
if packet_id not in func.event: continue
if regexp is None:
if func.thread:
targs = (func, list(origin), self,list(origin), data)
t = threading.Thread(target=self.call, args=targs)
t.start()
else: self.call(func, list(origin), self, list(origin),data)
elif isinstance(data,unicode):
text = data
match = regexp.match(text)
if match:
input = self.input(list(origin), text, data, match)
if input.nick.lower() in self.config.ignore:
continue
phenny = self.wrapped(list(origin), input, text, match)
t = time.time()
if input.admin or input.nick not in self.cooldowns or\
(input.nick in self.cooldowns \
and \
t - self.cooldowns[input.nick]\
>= self.config.cooldown):
self.cooldowns[input.nick] = t
if func.thread:
targs = (func, list(origin), phenny, input)
t = threading.Thread(target=self.call, args=targs)
t.start()
else: self.call(func, list(origin), phenny, input)
def noauth(self, input):
self.write_packet(packets.ID.HON_SC_WHISPER, input.nick, 'You do not have access to this command.')
return False
def log( self, msg ):
print( "[{0}] {1}".format( time.strftime("%H:%M"), msg ) )
|
HRM.py
|
# Copyright 2021 by RaithSphere
# With thanks to Ryuvi
# All rights reserved.
# This file is part of the NeosVR-HRM,
# and is released under the "MIT License Agreement". Please see the LICENSE
# file that should have been included as part of this package.
import argparse
import configparser
import logging
import math
import os
import socket
import statistics
import sys
import threading
import time
from sys import platform
import pexpect
from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
# For windows we are using BleakClient
if platform == "win32" or platform == "win64":
from bleak import BleakClient
from bleak import discover
import asyncio
HR_UUID = "00002a37-0000-1000-8000-00805f9b34fb"
BT_UUID = "00002a19-0000-1000-8000-00805f9b34fb"
datafile = open("../storage/hrbt.txt", "w")
logging.basicConfig(format="%(asctime)-15s %(message)s")
log = logging.getLogger("HeartRateLogger")
FinalSamples = 24
HR = -1
HRV = 0
RRAvg = [0 for i in range(FinalSamples)]
bt = -1
ct = False
connected = False
TwentyfourBeatAvg = [0 for i in range(FinalSamples * 2)]
log.setLevel(logging.INFO)
log.info("Starting Script")
class SimpleEcho(WebSocket):
def handleMessage(self):
hrbt = open("../storage/hrbt.txt", "r")
data = hrbt.read()
self.sendMessage(data)
hrbt.close()
def handleConnected(self):
print(self.address, "connected")
def handleClose(self):
print(self.address, "closed")
def parse_args():
"""
Command line argument parsing
"""
parser = argparse.ArgumentParser(
description="Bluetooth heart rate monitor data logger"
)
parser.add_argument(
"-mac",
metavar="MAC",
type=str,
help="MAC address of BLE device (default: auto-discovery)",
)
parser.add_argument("-battery", action="store_true", help="Check battery level")
parser.add_argument(
"-g",
metavar="PATH",
type=str,
help="gatttool path (default: system available)",
default="gatttool",
)
parser.add_argument(
"-H",
metavar="HR_HANDLE",
type=str,
help="Gatttool handle used for HR notifications (default: none)",
)
parser.add_argument("-v", action="store_true", help="Verbose output")
parser.add_argument("-d", action="store_true", help="Enable debug of gatttool")
parser.add_argument("-port", action="store_true", help="Set the port")
parser.add_argument(
"-s", action="store_true", help="Scan for bluetooth devices - Windows only"
)
parser.add_argument(
"-a", action="store_true", help="Get List of services - Windows Only"
)
confpath = os.path.join(os.path.dirname(os.path.realpath(__file__)), "Config.conf")
if os.path.exists(confpath):
config = configparser.ConfigParser()
config.read([confpath])
config = dict(config.items("config"))
# We compare here the configuration given in the config file with the
# configuration of the parser.
arguments = vars(parser.parse_args([]))
err = False
for key in config.keys():
if key not in arguments:
log.error("Configuration file error: invalid key '" + key + "'.")
err = True
if err:
sys.exit(1)
parser.set_defaults(**config)
return parser.parse_args()
def get_ble_hr_mac():
"""
Scans BLE devices and returs the address of the first device found.
"""
global addr
while 1:
log.info("Trying to find a BLE device")
hci = pexpect.spawn("hcitool lescan")
try:
hci.expect("([0-9A-F]{2}[:-]){5}([0-9A-F]{2})", timeout=20)
addr = hci.match.group(0)
hci.close()
break
except pexpect.TIMEOUT:
time.sleep(20)
continue
except KeyboardInterrupt:
log.info("Received keyboard interrupt. Quitting cleanly.")
hci.close()
return None
# We wait for the 'hcitool lescan' to finish
time.sleep(1)
return addr
def cli():
"""
Entry point for the command line interface
"""
log.info("Starting CLI Thread")
if platform == "linux" or platform == "linux2":
log.info("Detected Platform Linux")
main_linux(args.mac, args.g, args.battery, args.H, args.d)
def connect(windows):
connected = True
windows.run_until_complete(main_windows(args.mac))
async def main_windows(address=None):
async with BleakClient(address) as client:
log.info("Connected, streaming data...")
await client.start_notify(HR_UUID, processhr)
while True:
global bt, ct
bt = int.from_bytes(await client.read_gatt_char(BT_UUID), byteorder="big")
writeout(None, None, bt, ct)
await asyncio.sleep(1.0)
def processhr(s, d):
byte0 = d[0]
res = {"hrv_uint8": (byte0 & 1) == 0}
sensor_contact = (byte0 >> 1) & 3
global ct
if sensor_contact == 2:
res["sensor_contact"] = "No contact detected"
ct = False
elif sensor_contact == 3:
res["sensor_contact"] = "Contact detected"
ct = True
else:
res["sensor_contact"] = "Sensor contact not supported"
res["ee_status"] = ((byte0 >> 3) & 1) == 1
res["rr_interval"] = ((byte0 >> 4) & 1) == 1
if res["hrv_uint8"]:
res["hr"] = d[1]
i = 2
else:
res["hr"] = (d[2] << 8) | d[1]
i = 3
if res["ee_status"]:
res["ee"] = (d[i + 1] << 8) | d[i]
i += 2
if res["rr_interval"]:
res["rr"] = []
while i < len(d):
# Note: Need to divide the value by 1024 to get in seconds
res["rr"].append((d[i + 1] << 8) | d[i])
i += 2
global HRV
if res["rr_interval"]:
for i in res["rr"]:
TwentyfourBeatAvg.insert(0, i)
del TwentyfourBeatAvg[-1]
global RRAvg
for i in range(FinalSamples):
n = i * 2
nextn = (
TwentyfourBeatAvg[n + 1]
if TwentyfourBeatAvg[n + 1] != 0
else TwentyfourBeatAvg[n]
)
RRAvg[i] = pow(TwentyfourBeatAvg[n] - nextn, 2)
HRV = math.sqrt(statistics.mean(RRAvg))
writeout(res["hr"], HRV, None, None)
def main_linux(
addr=None,
gatttool="gatttool",
check_battery=False,
hr_handle=None,
debug_gatttool=False,
):
"""
main routine to which orchestrates everything
"""
hr_ctl_handle = None
retry = True
global ct, bt, gt
while retry:
while 1:
log.info("Establishing connection to " + addr)
gt = pexpect.spawn(gatttool + " -b " + addr + " -t random --interactive")
if debug_gatttool:
gt.logfile = sys.stdout
gt.expect(r"\[LE\]>")
gt.sendline("connect")
try:
i = gt.expect(["Connection successful.", r"\[CON\]"], timeout=30)
if i == 0:
gt.expect(r"\[LE\]>", timeout=30)
except pexpect.TIMEOUT:
log.info("Connection timeout. Retrying.")
continue
except KeyboardInterrupt:
log.info("Received keyboard interrupt. Quitting cleanly.")
retry = False
break
break
if not retry:
break
log.info("Connected to " + addr)
if check_battery:
gt.sendline("char-read-uuid 00002a19-0000-1000-8000-00805f9b34fb")
try:
gt.expect("value: ([0-9a-f]+)")
battery_level = gt.match.group(1)
log.info("Battery level: " + str(int(battery_level, 16)))
bt = str(int(battery_level, 16))
except pexpect.TIMEOUT:
log.error("Couldn't read battery level.")
if hr_handle is None:
# We determine which handle we should read for getting the heart rate
# measurement characteristic.
gt.sendline("char-desc")
while 1:
try:
gt.expect(
r"handle: (0x[0-9a-f]+), uuid: ([0-9a-f]{8})", timeout=60
) # Had to increase the timeout from 10 for Wahoo Tickr X
except pexpect.TIMEOUT:
break
handle = gt.match.group(1).decode()
uuid = gt.match.group(2).decode()
if uuid == "00002902" and hr_handle:
log.debug("Scanning 00002902 for hr_ctl_handle")
hr_ctl_handle = handle
break
elif uuid == "00002a37":
log.debug("Scanning 00002a37 for hr_handle")
hr_handle = handle
if hr_handle is None:
log.error("Couldn't find the heart rate measurement handle?!")
return
if hr_ctl_handle:
# We send the request to get HRM notifications
log.info("Starting Heart Data Collection Process")
gt.sendline("char-write-req " + hr_ctl_handle + " 0100")
# Time period between two measures. This will be updated automatically.
period = 1.0
last_measure = time.time() - period
hr_expect = "Notification handle = " + hr_handle + " value: ([0-9a-f ]+)"
while 1:
try:
gt.expect(hr_expect, timeout=10)
except pexpect.TIMEOUT:
# If the timer expires, it means that we have lost the
# connection with the HR monitor
log.warning("Connection lost with " + addr + ". Reconnecting.")
writeout(0, 0, 0, 0)
time.sleep(1)
break
except KeyboardInterrupt:
writeout(0, 0, 0, 0)
log.info("Received keyboard interrupt. Quitting cleanly.")
retry = False
clithread.join()
break
# We measure here the time between two measures. As the sensor
# sometimes sends a small burst, we have a simple low-pass filter
# to smooth the measure.
tmeasure = time.time()
period = period + 1 / 16.0 * ((tmeasure - last_measure) - period)
last_measure = tmeasure
# Get data from gatttool
datahex = gt.match.group(1).strip()
data = map(lambda x: int(x, 16), datahex.split(b" "))
res = interpret(list(data))
log.debug(res)
writeout(None, None, bt, ct)
# We quit close the BLE connection properly
gt.sendline("quit")
try:
gt.wait()
except:
pass
def interpret(data):
"""
data is a list of integers corresponding to readings from the BLE HR monitor
"""
byte0 = data[0]
res = {"hrv_uint8": (byte0 & 1) == 0}
sensor_contact = (byte0 >> 1) & 3
global ct
if sensor_contact == 2:
res["sensor_contact"] = "No contact detected"
ct = False
elif sensor_contact == 3:
res["sensor_contact"] = "Contact detected"
ct = True
else:
res["sensor_contact"] = "Sensor contact not supported"
res["ee_status"] = ((byte0 >> 3) & 1) == 1
res["rr_interval"] = ((byte0 >> 4) & 1) == 1
if res["hrv_uint8"]:
res["hr"] = data[1]
i = 2
else:
res["hr"] = (data[2] << 8) | data[1]
i = 3
if res["ee_status"]:
res["ee"] = (data[i + 1] << 8) | data[i]
i += 2
if res["rr_interval"]:
res["rr"] = []
while i < len(data):
# Note: Need to divide the value by 1024 to get in seconds
res["rr"].append((data[i + 1] << 8) | data[i])
i += 2
global HRV
if res["rr_interval"]:
for i in res["rr"]:
TwentyfourBeatAvg.insert(0, i)
del TwentyfourBeatAvg[-1]
global RRAvg
for i in range(FinalSamples):
n = i * 2
nextn = (
TwentyfourBeatAvg[n + 1]
if TwentyfourBeatAvg[n + 1] != 0
else TwentyfourBeatAvg[n]
)
RRAvg[i] = pow(TwentyfourBeatAvg[n] - nextn, 2)
HRV = math.sqrt(statistics.mean(RRAvg))
writeout(res["hr"], HRV, None, None)
return res
def writeout(hr, hrv, battery, contact):
if hr is None and hrv is None and battery is None and contact is None:
datafile.seek(0)
datafile.write(str("0000.00000000.0000.0"))
datafile.truncate()
else:
datafile.seek(13 if hr is None else 0)
datafile.write(
".{:4s}.{:1s}".format(str(battery), "1" if contact is True else "0")
if hr is None
else "{:4s}.{:8.4f}".format(str(hr), hrv)
)
async def searchbt():
devices = await discover()
""" Ignore HTC or Unknown Devices """
Devicelist = ["HTC", "Unknown", "Apple", "Google"]
KnownDevices = ["808S", "Polar", "XOSS"]
for d in devices:
dx = str(d)
if not any(x in dx for x in Devicelist):
if any(x in dx for x in KnownDevices):
log.info("\033[92mPossible Tracker: %s \033[0m", dx)
else:
log.info(dx)
async def getservices(address: str):
async with BleakClient(address) as client:
x = await client.is_connected()
log.info("Connected: {0}".format(x))
for service in client.services:
log.info("[Service] {0}: {1}".format(service.uuid, service.description))
for char in service.characteristics:
if "read" in char.properties:
try:
value = bytes(await client.read_gatt_char(char.uuid))
except Exception as e:
value = str(e).encode()
else:
value = None
log.info(
"\t[Characteristic] {0}: (Handle: {1}) ({2}) | Name: {3}, Value: {4} ".format(
char.uuid,
char.handle,
",".join(char.properties),
char.description,
value,
)
)
for descriptor in char.descriptors:
value = await client.read_gatt_descriptor(descriptor.handle)
log.info(
"\t\t[Descriptor] {0}: (Handle: {1}) | Value: {2} ".format(
descriptor.uuid, descriptor.handle, bytes(value)
)
)
def http(webport):
server = SimpleWebSocketServer("", webport, SimpleEcho)
server.serveforever()
if __name__ == "__main__":
args = parse_args()
if args.s and platform == "win32" or platform == "win64":
log.info("Starting bluetooth device scan")
loop = asyncio.get_event_loop()
loop.run_until_complete(searchbt())
elif args.a and platform == "win32" or platform == "win64":
log.info("Getting Bluetooth Services list for %s" % args.mac)
loop = asyncio.get_event_loop()
loop.run_until_complete(getservices(args.mac))
else:
if os.path.isfile("Config.conf"):
log.info("Found config file")
else:
log.error(
"ERROR: Unable to find config file Config.conf, check the filename"
)
exit()
if args.g != "gatttool" and not os.path.exists(args.g):
log.critical("Couldn't find gatttool path!")
sys.exit(1)
# Increase verbose level
if args.v:
log.setLevel(logging.DEBUG)
log.info("Log level set to DEBUG")
else:
log.setLevel(logging.INFO)
log.info("Log Level set to INFOMATIVE")
hostname = socket.gethostname()
local_ip = socket.gethostbyname(hostname)
log.info("SimpleEcho Started ws://%s:%s" % (local_ip, args.port))
log.info(
"\33[1m\33[94mNotice if you are running this locally on the same PC as NeosVR Connect to ws://localhost:%s\33[0m" % args.port)
wthread = threading.Thread(target=http, args=(args.port,), daemon=True)
wthread.start()
if platform == "darwin":
log.info("Detected Platform Darwin - Unsupported - Terminating Process")
quit()
elif platform == "win32" or platform == "win64":
log.info("Detected Platform Windows - Experimental")
log.info("Connecting to " + args.mac)
loop = asyncio.get_event_loop()
while not connected:
try:
connect(loop)
except KeyboardInterrupt:
print("Ending...")
datafile.close()
break
except Exception as e:
print(e)
elif platform == "linux" or platform == "linux2":
clithread = threading.Thread(target=cli, daemon=True)
clithread.start()
while True:
time.sleep(10)
user_input = input("[Bluetooth Control]: ")
if user_input == "quit":
log.info("Exiting HRM")
exit(0)
elif user_input == "help":
log.info("System Commands")
log.info("---------------")
log.info("Quit - Exit the program and terminate process")
log.info("Help - Shows this help ")
else:
print("This is not a correct command.")
|
monitoringservice.py
|
import datetime
import logging as log
import threading
import time
from pymongo import MongoClient
from . import discoveryconst as const
class MonitoringService:
def __init__(self):
"""
Initialize the monitoring service and all database collections.
"""
self.db_client = MongoClient()
self.dev_coll = self.db_client[const.DISCOVERY_DB_NAME][const.MONITOR_COLL_NAME]
self.connde_devices = self.db_client[const.RMP_DB_NAME][const.RMP_DEVICE_COLLECTION]
self.connde_sensors = self.db_client[const.RMP_DB_NAME][const.RMP_SENSOR_COLLECTION]
self.monitor = False # indicates whether the service is running
def start(self):
log.info('Start monitoring')
self.monitor = True
t = threading.Thread(target=self._loop_forever)
t.start()
def _loop_forever(self):
"""
In every iteration, read the list of monitored devices from the database.
Check for each device in the list,
if the time period since the last contact is greater than the specified timeout interval.
"""
while self.monitor:
cur_time = datetime.datetime.utcnow()
log.debug('Monitoring checking devices @ |%s|', str(cur_time))
dev_cursor = self.dev_coll.find({}) # load all devices
to_delete = [] # list of GLOBAL_IDs to delete
for monitored_device in dev_cursor:
if const.GLOBAL_ID not in monitored_device:
log.warning('no GLOBAL_ID specified for device |%s|', str(monitored_device))
continue
global_id = monitored_device[const.GLOBAL_ID]
if const.LAST_CONTACT not in monitored_device:
log.warning('no last contact noted for device |%s|', str(monitored_device))
continue
last_contact = monitored_device[const.LAST_CONTACT]
if const.TIMEOUT not in monitored_device:
log.warning('no timeout specified for device |%s|', str(monitored_device))
continue
timeout = datetime.timedelta(seconds=monitored_device[const.TIMEOUT])
if cur_time - last_contact > timeout:
log.info('device |%d| timed out', global_id)
to_delete.append(global_id)
log.debug('Monitoring deleting timed out devices |%s|', str(to_delete))
delete_dict = {
const.GLOBAL_ID: {'$in': to_delete}
}
self.dev_coll.delete_many(delete_dict)
self.connde_sensors.delete_many(delete_dict)
self.connde_devices.delete_many(delete_dict)
time.sleep(const.SERVER_MONITOR_SLEEP)
def stop(self):
log.info('Stop monitoring')
self.monitor = False
|
test_context.py
|
# -*- coding: utf-8 -*-
"""
tests.unit.context_test
~~~~~~~~~~~~~~~~~~~~
"""
# Import python libs
from __future__ import absolute_import
import threading
import time
import salt.ext.tornado.gen
import salt.ext.tornado.stack_context
# Import Salt libs
import salt.utils.json
from salt.ext.six.moves import range
from salt.ext.tornado.testing import AsyncTestCase, gen_test
from salt.utils.context import ContextDict, NamespacedDictWrapper
# Import Salt Testing libs
from tests.support.unit import TestCase
class ContextDictTests(AsyncTestCase):
# how many threads/coroutines to run at a time
num_concurrent_tasks = 5
def setUp(self):
super(ContextDictTests, self).setUp()
self.cd = ContextDict()
# set a global value
self.cd["foo"] = "global"
def test_threads(self):
"""Verify that ContextDict overrides properly within threads
"""
rets = []
def tgt(x, s):
inner_ret = []
over = self.cd.clone()
inner_ret.append(self.cd.get("foo"))
with over:
inner_ret.append(over.get("foo"))
over["foo"] = x
inner_ret.append(over.get("foo"))
time.sleep(s)
inner_ret.append(over.get("foo"))
rets.append(inner_ret)
threads = []
for x in range(0, self.num_concurrent_tasks):
s = self.num_concurrent_tasks - x
t = threading.Thread(target=tgt, args=(x, s))
t.start()
threads.append(t)
for t in threads:
t.join()
for r in rets:
self.assertEqual(r[0], r[1])
self.assertEqual(r[2], r[3])
@gen_test
def test_coroutines(self):
"""Verify that ContextDict overrides properly within coroutines
"""
@salt.ext.tornado.gen.coroutine
def secondary_coroutine(over):
raise salt.ext.tornado.gen.Return(over.get("foo"))
@salt.ext.tornado.gen.coroutine
def tgt(x, s, over):
inner_ret = []
# first grab the global
inner_ret.append(self.cd.get("foo"))
# grab the child's global (should match)
inner_ret.append(over.get("foo"))
# override the global
over["foo"] = x
inner_ret.append(over.get("foo"))
# sleep for some time to let other coroutines do this section of code
yield salt.ext.tornado.gen.sleep(s)
# get the value of the global again.
inner_ret.append(over.get("foo"))
# Call another coroutine to verify that we keep our context
r = yield secondary_coroutine(over)
inner_ret.append(r)
raise salt.ext.tornado.gen.Return(inner_ret)
futures = []
for x in range(0, self.num_concurrent_tasks):
s = self.num_concurrent_tasks - x
over = self.cd.clone()
# pylint: disable=cell-var-from-loop
f = salt.ext.tornado.stack_context.run_with_stack_context(
salt.ext.tornado.stack_context.StackContext(lambda: over),
lambda: tgt(x, s / 5.0, over),
)
# pylint: enable=cell-var-from-loop
futures.append(f)
wait_iterator = salt.ext.tornado.gen.WaitIterator(*futures)
while not wait_iterator.done():
r = yield wait_iterator.next() # pylint: disable=incompatible-py3-code
self.assertEqual(r[0], r[1]) # verify that the global value remails
self.assertEqual(r[2], r[3]) # verify that the override sticks locally
self.assertEqual(
r[3], r[4]
) # verify that the override sticks across coroutines
def test_basic(self):
"""Test that the contextDict is a dict
"""
# ensure we get the global value
self.assertEqual(
dict(self.cd), {"foo": "global"},
)
def test_override(self):
over = self.cd.clone()
over["bar"] = "global"
self.assertEqual(
dict(over), {"foo": "global", "bar": "global"},
)
self.assertEqual(
dict(self.cd), {"foo": "global"},
)
with over:
self.assertEqual(
dict(over), {"foo": "global", "bar": "global"},
)
self.assertEqual(
dict(self.cd), {"foo": "global", "bar": "global"},
)
over["bar"] = "baz"
self.assertEqual(
dict(over), {"foo": "global", "bar": "baz"},
)
self.assertEqual(
dict(self.cd), {"foo": "global", "bar": "baz"},
)
self.assertEqual(
dict(over), {"foo": "global", "bar": "baz"},
)
self.assertEqual(
dict(self.cd), {"foo": "global"},
)
def test_multiple_contexts(self):
cds = []
for x in range(0, 10):
cds.append(self.cd.clone(bar=x))
for x, cd in enumerate(cds):
self.assertNotIn("bar", self.cd)
with cd:
self.assertEqual(
dict(self.cd), {"bar": x, "foo": "global"},
)
self.assertNotIn("bar", self.cd)
class NamespacedDictWrapperTests(TestCase):
PREFIX = "prefix"
def setUp(self):
self._dict = {}
def test_single_key(self):
self._dict["prefix"] = {"foo": "bar"}
w = NamespacedDictWrapper(self._dict, "prefix")
self.assertEqual(w["foo"], "bar")
def test_multiple_key(self):
self._dict["prefix"] = {"foo": {"bar": "baz"}}
w = NamespacedDictWrapper(self._dict, ("prefix", "foo"))
self.assertEqual(w["bar"], "baz")
def test_json_dumps_single_key(self):
self._dict["prefix"] = {"foo": {"bar": "baz"}}
w = NamespacedDictWrapper(self._dict, "prefix")
self.assertEqual(salt.utils.json.dumps(w), '{"foo": {"bar": "baz"}}')
def test_json_dumps_multiple_key(self):
self._dict["prefix"] = {"foo": {"bar": "baz"}}
w = NamespacedDictWrapper(self._dict, ("prefix", "foo"))
self.assertEqual(salt.utils.json.dumps(w), '{"bar": "baz"}')
|
display_server.py
|
import threading
import Adafruit_SSD1306
import time
import PIL.Image
import PIL.ImageFont
import PIL.ImageDraw
from flask import Flask
from .utils import ip_address, cpu_usage, memory_usage, disk_usage, temp
from pidisplay import ads1115
from pidisplay import ina219
import os
class DisplayServer(object):
def __init__(self, *args, **kwargs):
adress = os.popen("i2cdetect -y -r 1 0x48 0x48 | egrep '48' | awk '{print $2}'").read()
if(adress=='48\n'):
self.ads = ads1115.ADS1115()
else:
self.ads = None
adress = os.popen("i2cdetect -y -r 1 0x41 0x41 | egrep '41' | awk '{print $2}'").read()
if(adress=='41\n'):
self.ina219 = ina219.INA219(addr=0x41)
else:
self.ina219 = None
adress = os.popen("i2cdetect -y -r 1 0x42 0x42 | egrep '42' | awk '{print $2}'").read()
if(adress=='42\n'):
self.ina = ina219.INA219(addr=0x42)
else:
self.ina = None
self.display = Adafruit_SSD1306.SSD1306_128_32(rst=None, i2c_bus=1, gpio=1)
self.display.begin()
self.display.clear()
self.display.display()
self.font = PIL.ImageFont.load_default()
self.image = PIL.Image.new('1', (self.display.width, self.display.height))
self.draw = PIL.ImageDraw.Draw(self.image)
self.draw.rectangle((0, 0, self.image.width, self.image.height), outline=0, fill=0)
self.stats_enabled = False
self.stats_thread = None
self.stats_interval = 1.0
self.enable_stats()
def _run_display_stats(self):
Charge = False
while self.stats_enabled:
self.draw.rectangle((0, 0, self.image.width, self.image.height), outline=0, fill=0)
# set IP address
top = -2
if ip_address('eth0') is not None:
self.draw.text((4, top), 'IP: ' + str(ip_address('eth0')), font=self.font, fill=255)
elif ip_address('wlan0') is not None:
self.draw.text((4, top), 'IP: ' + str(ip_address('wlan0')), font=self.font, fill=255)
else:
self.draw.text((4, top), 'IP: not available')
top = 6
if(self.ina != None):
bus_voltage = self.ina.getBusVoltage_V() # voltage on V- (load side)
current = self.ina.getCurrent_mA() # current in mA
p = (bus_voltage - 6)/2.4*100
if(p > 100):p = 100
if(p < 0):p = 0
if(current < 0):current = 0
if(current > 30):
Charge = not Charge
else:
Charge = False
if(Charge == False):
self.draw.text((600, -2), ' ', font=self.font, fill=255)
else:
self.draw.text((120, -2), '*', font=self.font, fill=255)
self.draw.text((4, top), (" %.1fV")%bus_voltage + (" %.2fA")%(current/1000) + (" %2.0f%%")%p, font=self.font, fill=255)
elif(self.ina219 != None):
bus_voltage = self.ina219.getBusVoltage_V() # voltage on V- (load side)
current = self.ina219.getCurrent_mA() # current in mA
p = (bus_voltage - 9)/3.6*100
if(p > 100):p = 100
if(p < 0):p = 0
if(current < 0):current = 0
if(current > 30):
Charge = not Charge
else:
Charge = False
if(Charge == False):
self.draw.text((600, -2), ' ', font=self.font, fill=255)
else:
self.draw.text((120, -2), '*', font=self.font, fill=255)
self.draw.text((4, top), (" %.1fV")%bus_voltage + (" %.2fA")%(current/1000) + (" %2.0f%%")%p, font=self.font, fill=255)
elif(self.ads != None):
value=self.ads.readVoltage(4)/1000.0
p = value/12.6*100
if(p > 100):p = 100
self.draw.text((4, top), 'PWR: ' + (" %.1fV")%value + (" %2.0f%%")%p, font=self.font, fill=255)
else:
self.draw.text((4, top), 'PWR No Detection' , font=self.font, fill=255)
# set stats headers
top = 14
offset = 4 * 8
headers = ['CPU', 'RAM', 'DSK', 'TMP']
for i, header in enumerate(headers):
self.draw.text((i * offset + 4, top), header, font=self.font, fill=255)
# set stats fields
top = 22
cpu_percent = '%2d%%' % int(round(cpu_usage() * 100.0, 1))
ram_percent = '%2d%%' % int(round(memory_usage() * 100.0, 1))
disk_percent = '%2d%%' % int(round(disk_usage() * 100.0, 1))
temp_percent = '%2d' % int(round(temp(), 1))
entries = [cpu_percent, ram_percent, disk_percent, temp_percent]
for i, entry in enumerate(entries):
self.draw.text((i * offset + 4, top), entry, font=self.font, fill=255)
self.display.image(self.image)
self.display.display()
time.sleep(self.stats_interval)
def enable_stats(self):
# start stats display thread
if not self.stats_enabled:
self.stats_enabled = True
self.stats_thread = threading.Thread(target=self._run_display_stats)
self.stats_thread.start()
def disable_stats(self):
self.stats_enabled = False
if self.stats_thread is not None:
self.stats_thread.join()
self.draw.rectangle((0, 0, self.image.width, self.image.height), outline=0, fill=0)
self.display.image(self.image)
self.display.display()
def set_text(self, text):
self.disable_stats()
self.draw.rectangle((0, 0, self.image.width, self.image.height), outline=0, fill=0)
lines = text.split('\n')
top = 2
for line in lines:
self.draw.text((4, top), line, font=self.font, fill=255)
top += 10
self.display.image(self.image)
self.display.display()
server = DisplayServer()
app = Flask(__name__)
@app.route('/stats/on')
def enable_stats():
global server
server.enable_stats()
return "stats enabled"
@app.route('/stats/off')
def disable_stats():
global server
server.disable_stats()
return "stats disabled"
@app.route('/text/<text>')
def set_text(text):
global server
server.set_text(text)
return 'set text: \n\n%s' % text
if __name__ == '__main__':
app.run(host='0.0.0.0', port='8000', debug=False)
|
listen.py
|
# -*- coding: utf-8 -*-
import socket
import logging
from errno import ENOPROTOOPT
import time
import threading
from .upnp_class import UPNPObject
SSDP_PORT = 1900
SSDP_ADDR = '239.255.255.250'
def listen(timeout, log_level=None):
logger = logging.getLogger('UPNP_Devices')
logger.setLevel(logging.NOTSET)
if log_level is not None:
logger.setLevel(log_level)
threads = []
event = threading.Event()
found = []
found_event = threading.Event()
def do(lcl_address):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT"):
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except socket.error as err:
# RHEL6 defines SO_REUSEPORT but it doesn't work
if err.errno == ENOPROTOOPT:
pass
else:
raise err
addr = socket.inet_aton(SSDP_ADDR)
interface = socket.inet_aton(lcl_address)
cmd = socket.IP_ADD_MEMBERSHIP
sock.setsockopt(socket.IPPROTO_IP, cmd, addr + interface)
sock.bind((lcl_address, SSDP_PORT))
sock.settimeout(1)
logger.debug('SSDP bound on address ' + lcl_address)
start = time.time()
while time.time() - start < timeout:
try:
data, addr = sock.recvfrom(1024)
if data:
host, port = addr
logger.debug('SSDP data: %s --> %s', host, data)
try:
header, payload = data.decode().split('\r\n\r\n')[:2]
except ValueError as err:
logger.error(err)
continue
lines = header.split('\r\n')
cmd = lines[0].split(' ')
lines = map(lambda x: x.replace(': ', ':', 1), lines[1:])
lines = filter(lambda x: len(x) > 0, lines)
headers = [x.split(':', 1) for x in lines]
headers = dict(map(lambda x: (x[0].lower(), x[1]), headers))
logger.debug('SSDP command %s %s - from %s:%d', cmd[0], cmd[1], host, port)
logger.debug('with headers: %s.', headers)
if cmd[0] == 'M-SEARCH' and cmd[1] == '*':
logger.debug('M-SEARCH *')
elif cmd[0] == 'NOTIFY' and cmd[1] == '*':
addr = addr[0]
start = data.lower().find(b'nt:')
if start > -1:
start += 3
nt = data[start:]
nt = nt[:nt.find(b'\n')].strip()
else:
continue
logger.debug('SSDP: %s found nt: %s', addr, nt)
if nt != b'upnp:rootdevice':
continue
start = data.lower().find(b'st:')
if start > -1:
start += 3
st = data[start:]
st = st[:st.find(b'\n')].strip()
else:
continue
start = data.lower().find(b'location:')
if start > -1:
start += 9
location = data[start:]
location = location[:location.find(b'\n')].strip()
else:
continue
logger.debug('SSDP: %s found st: %s', addr, st)
logger.debug('SSDP: %s found location: %s', addr, location)
found.append(UPNPObject(addr, {st: location}))
found_event.set()
else:
logger.debug('Unknown SSDP command %s %s', cmd[0], cmd[1])
except socket.timeout:
continue
try:
sock.close()
except socket.error:
pass
found_event.set()
threads.remove(threading.current_thread())
if not threads:
event.set()
found_event.set()
for local_address in get_local_addresses():
t = threading.Thread(target=do, args=(local_address,))
t.daemon = True
threads += [t]
t.start()
while not event.isSet():
found_event.wait()
found_event.clear()
while found:
yield found.pop(0)
|
service_async.py
|
# Copyright (c) 2018 http://reportportal.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import threading
from six.moves import queue
from .errors import Error
from .service import ReportPortalService
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class QueueListener(object):
_sentinel_item = None
def __init__(self, queue, *handlers, **kwargs):
self.queue = queue
self.queue_get_timeout = kwargs.get("queue_get_timeout", None)
self.handlers = handlers
self._stop_nowait = threading.Event()
self._stop = threading.Event()
self._thread = None
def dequeue(self, block=True):
"""Dequeue a record and return item."""
return self.queue.get(block, self.queue_get_timeout)
def start(self):
"""Start the listener.
This starts up a background thread to monitor the queue for
items to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self, record):
"""Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""Handle an item.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler(record)
def _monitor(self):
"""Monitor the queue for items, and ask the handler to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
err_msg = ("invalid internal state:"
" _stop_nowait can not be set if _stop is not set")
assert self._stop.isSet() or not self._stop_nowait.isSet(), err_msg
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel_item:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue,
# handle then unless _stop_nowait is set.
while not self._stop_nowait.isSet():
try:
record = self.dequeue(False)
if record is self._sentinel_item:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def stop(self, nowait=False):
"""Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
If nowait is False then thread will handle remaining items in queue and
stop.
If nowait is True then thread will be stopped even if the queue still
contains items.
"""
self._stop.set()
if nowait:
self._stop_nowait.set()
self.queue.put_nowait(self._sentinel_item)
if (self._thread.isAlive() and
self._thread is not threading.currentThread()):
self._thread.join()
self._thread = None
class ReportPortalServiceAsync(object):
"""Wrapper around service class to transparently provide async operations
to agents.
"""
def __init__(self, endpoint, project, token, api_base="api/v1",
error_handler=None, log_batch_size=20,
is_skipped_an_issue=True,
verify_ssl=True, queue_get_timeout=5):
"""Init the service class.
Args:
endpoint: endpoint of report portal service.
project: project name to use for launch names.
token: authorization token.
api_base: defaults to api/v1, can be changed to other version.
error_handler: function to be called to handle errors occurred
during items processing (in thread)
is_skipped_an_issue: option to mark skipped tests as not
'To Investigate' items on Server side.
verify_ssl: option to not verify ssl certificates
"""
super(ReportPortalServiceAsync, self).__init__()
self.error_handler = error_handler
self.log_batch_size = log_batch_size
self.rp_client = ReportPortalService(
endpoint, project, token,
api_base,
is_skipped_an_issue,
verify_ssl)
self.log_batch = []
self.supported_methods = ["start_launch", "finish_launch",
"start_test_item", "finish_test_item", "log"]
self.queue = queue.Queue()
self.listener = QueueListener(self.queue, self.process_item,
queue_get_timeout=queue_get_timeout)
self.listener.start()
self.lock = threading.Lock()
def terminate(self, nowait=False):
"""Finalize and stop service
Args:
nowait: set to True to terminate immediately and skip processing
messages still in the queue
"""
with self.lock:
if not self.listener:
logger.warning("Service already stopped.")
return
self.listener.stop(nowait)
try:
if not nowait:
self._post_log_batch()
except Exception:
if self.error_handler:
self.error_handler(sys.exc_info())
else:
raise
finally:
self.queue = None
self.listener = None
def _post_log_batch(self):
if self.log_batch:
try:
self.rp_client.log_batch(self.log_batch)
finally:
self.log_batch = []
def process_log(self, **log_item):
"""Special handler for log messages.
Accumulate incoming log messages and post them in batch.
"""
self.log_batch.append(log_item)
if len(self.log_batch) >= self.log_batch_size:
self._post_log_batch()
def process_item(self, item):
"""Main item handler.
Called by queue listener.
"""
method, kwargs = item
if method not in self.supported_methods:
raise Error("Not expected service method: {}".format(method))
try:
if method == "log":
self.process_log(**kwargs)
else:
self._post_log_batch()
getattr(self.rp_client, method)(**kwargs)
except Exception:
if self.error_handler:
self.error_handler(sys.exc_info())
else:
self.terminate(nowait=True)
raise
def start_launch(self, name, start_time, description=None, tags=None,
mode=None):
args = {
"name": name,
"description": description,
"tags": tags,
"start_time": start_time,
"mode": mode
}
self.queue.put_nowait(("start_launch", args))
def finish_launch(self, end_time, status=None):
args = {
"end_time": end_time,
"status": status
}
self.queue.put_nowait(("finish_launch", args))
def stop_launch(self, end_time, status=None):
args = {
"end_time": end_time,
"status": status
}
self.queue.put_nowait(("stop_launch", args))
def start_test_item(self, name, start_time, item_type, description=None,
tags=None, parameters=None):
args = {
"name": name,
"description": description,
"tags": tags,
"start_time": start_time,
"item_type": item_type,
"parameters": parameters,
}
self.queue.put_nowait(("start_test_item", args))
def finish_test_item(self, end_time, status, issue=None):
args = {
"end_time": end_time,
"status": status,
"issue": issue,
}
self.queue.put_nowait(("finish_test_item", args))
def log(self, time, message, level=None, attachment=None):
"""Logs a message with attachment.
The attachment is a dict of:
name: name of attachment
data: file content
mime: content type for attachment
"""
args = {
"time": time,
"message": message,
"level": level,
"attachment": attachment,
}
self.queue.put_nowait(("log", args))
|
aws_backend.py
|
"""AWS implementation of backend.py
Not thread-safe
"""
import glob
import os
import pprint
import shlex
import signal
import stat
import threading
import time
from typing import Tuple, List
import paramiko
from ncluster import ncluster_globals
from . import aws_create_resources as create_lib
from . import aws_util as u
from . import backend
from . import util
TMPDIR = '/tmp/ncluster' # location for temp files on launching machine
AWS_LOCK_FN = '/tmp/aws.lock' # lock file used to prevent concurrent creation of AWS resources by multiple workers in parallel
NCLUSTER_DEFAULT_REGION = 'us-east-1' # used as last resort if no other method set a region
LOGDIR_ROOT = '/ncluster/runs'
# some image which is fast to load, to use for quick runs
GENERIC_SMALL_IMAGE = 'amzn2-ami-hvm-2.0.20180622.1-x86_64-gp2'
class Task(backend.Task):
"""AWS task is initialized with an AWS instance and handles initialization,
creation of SSH session, shutdown"""
last_status: int # status of last command executed
tmux_window_id: int
tmux_available_window_ids: List[int]
sftp: paramiko.SFTPClient
def __init__(self, name, *, instance, install_script='', image_name='',
**extra_kwargs):
"""
Initializes Task on top of existing AWS instance. Blocks until instance is ready to execute
shell commands.
Args:
name: task name
instance: ec2.Instance object (https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#instance)
install_script:
image_name: AWS image name
**extra_kwargs: unused kwargs (kept for compatibility with other backends)
"""
self._cmd_fn = None
self._cmd = None
self._status_fn = None # location of output of last status
self.last_status = -1
self._can_run = False # indicates that things needed for .run were created
self.initialize_called = False
self.name = name
self.instance = instance
self.install_script = install_script
self.extra_kwargs = extra_kwargs
self.public_ip = u.get_public_ip(instance)
self.ip = u.get_ip(instance)
self.sftp = None
self._linux_type = 'ubuntu'
# heuristic to tell if I'm using Amazon image name
# default image has name like 'amzn2-ami-hvm-2.0.20180622.1-x86_64-gp2'
if 'amzn' in image_name.lower() or 'amazon' in image_name.lower():
self.log('Detected Amazon Linux image')
self._linux_type = 'amazon'
self.run_counter = 0
launch_id = util.random_id()
self.local_scratch = f"{TMPDIR}/{name}-{launch_id}"
self.remote_scratch = f"{TMPDIR}/{name}-{launch_id}"
os.system('mkdir -p ' + self.local_scratch)
self._initialized_fn = f'is_initialized'
# _current_directory tracks current directory on task machine
# used for uploading without specifying absolute path on target machine
if self._linux_type == 'ubuntu':
# self._current_directory = '/home/ubuntu'
self.ssh_username = 'ubuntu' # default username on task machine
elif self._linux_type == 'amazon':
# self._current_directory = '/home/ec2-user'
self.ssh_username = 'ec2-user'
self.homedir = '/home/' + self.ssh_username
self.ssh_client = u.ssh_to_task(self)
self._setup_tmux()
self._run_raw('mkdir -p ' + self.remote_scratch)
self._can_run = True
if self._is_initialized_fn_present():
self.log("reusing previous initialized state")
else:
self.log("running install script")
# bin/bash needed to make self-executable or use with UserData
self.install_script = '#!/bin/bash\n' + self.install_script
self.install_script += f'\necho ok > {self._initialized_fn}\n'
self.file_write('install.sh', util.shell_add_echo(self.install_script))
self.run('bash -e install.sh') # fail on errors
assert self._is_initialized_fn_present(), f"Install script didn't write to {self._initialized_fn}"
self._mount_efs()
self.connect_instructions = f"""
To connect to {self.name}
ssh -i {u.get_keypair_fn()} -o StrictHostKeyChecking=no {self.ssh_username}@{self.public_ip}
tmux a
""".strip()
self.log("Initialize complete")
self.log(self.connect_instructions)
def _is_initialized_fn_present(self):
self.log("Checking for initialization status")
try:
return 'ok' in self.read(self._initialized_fn)
except Exception:
return False
def _setup_tmux(self):
self.log("Setting up tmux")
self.tmux_session = self.name.replace('.', '=')
self.tmux_window_id = 0
self.tmux_available_window_ids = [0]
tmux_cmd = [f'tmux set-option -g history-limit 50000 \; ',
f'set-option -g mouse on \; ',
f'new-session -s {self.tmux_session} -n 0 -d']
# hack to get around Amazon linux not having tmux
if self._linux_type == 'amazon':
self._run_raw('sudo yum install tmux -y')
del tmux_cmd[1] # Amazon tmux is really old, no mouse option
if not util.is_set("NCLUSTER_NOKILL_TMUX"):
self._run_raw(f'tmux kill-session -t {self.tmux_session}',
ignore_errors=True)
else:
print(
"Warning, NCLUSTER_NOKILL_TMUX is on, make sure remote tmux prompt is available or things will hang")
self._run_raw(''.join(tmux_cmd))
self._can_run = True
def _mount_efs(self):
self.log("Mounting EFS")
region = u.get_region()
efs_id = u.get_efs_dict()[u.get_prefix()]
dns = f"{efs_id}.efs.{region}.amazonaws.com"
self.run('sudo mkdir -p /ncluster')
# ignore error on remount (efs already mounted)
self.run(
f"sudo mount -t nfs -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 {dns}:/ /ncluster",
ignore_errors=True)
# sometimes mount command doesn't work, make sure it's really mounted before returning
stdout, stderr = self.run_with_output('df')
while '/ncluster' not in stdout:
sleep_sec = 2
util.log(f"EFS not yet mounted, sleeping {sleep_sec} seconds")
time.sleep(sleep_sec)
self.run(
f"sudo mount -t nfs -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 {dns}:/ /ncluster",
ignore_errors=True)
stdout, stderr = self.run_with_output('df')
self.run('sudo chmod 777 /ncluster')
# Hack below may no longer be needed
# # make sure chmod is successful, hack to fix occasional permission errors
# while 'drwxrwxrwx' not in self.run_and_capture_output('ls -ld /ncluster'):
# print(f"chmod 777 /ncluster didn't take, retrying in {TIMEOUT_SEC}")
# time.sleep(TIMEOUT_SEC)
# self.run('sudo chmod 777 /ncluster')
# TODO(y): build a pstree and warn if trying to run something while main tmux bash has a subprocess running
# this would ensure that commands being sent are not being swallowed
def run(self, cmd, non_blocking=False, ignore_errors=False,
max_wait_sec=365 * 24 * 3600,
check_interval=0.2):
# TODO(y): make _run_with_output_on_failure default, and delete this
if util.is_set('NCLUSTER_RUN_WITH_OUTPUT_ON_FAILURE') or True:
# experimental version that captures output and prints it on failure
# redirection things break bash commands, so
# don't redirect on bash commands like source
# TODO(y): remove this, put in this filtering becase I thought it broke
# source activate, but now it seems it doesn't
if not util.is_bash_builtin(cmd) or True:
return self._run_with_output_on_failure(cmd, non_blocking,
ignore_errors,
max_wait_sec)
else:
self.log("Found bash built-in, using regular run")
if not self._can_run:
assert False, "Using .run before initialization finished"
if '\n' in cmd:
cmds = cmd.split('\n')
self.log(
f"Running {len(cmds)} commands at once, returning status of last")
status = -1
for subcmd in cmds:
status = self.run(subcmd)
self.last_status = status
return status
cmd = cmd.strip()
if cmd.startswith('#'): # ignore empty/commented out lines
return -1
self.run_counter += 1
self.log("tmux> %s", cmd)
self._cmd = cmd
self._cmd_fn = f'{self.remote_scratch}/{self.run_counter}.cmd'
self._status_fn = f'{self.remote_scratch}/{self.run_counter}.status'
cmd = util.shell_strip_comment(cmd)
assert '&' not in cmd, f"cmd {cmd} contains &, that breaks things"
# modify command to dump shell success status into file
self.file_write(self._cmd_fn, cmd + '\n')
modified_cmd = f'{cmd}; echo $? > {self._status_fn}'
modified_cmd = shlex.quote(modified_cmd)
tmux_window = self.tmux_session + ':' + str(self.tmux_window_id)
tmux_cmd = f'tmux send-keys -t {tmux_window} {modified_cmd} Enter'
self._run_raw(tmux_cmd, ignore_errors=ignore_errors)
if non_blocking:
return 0
if not self.wait_for_file(self._status_fn, max_wait_sec=30):
self.log(f"Retrying waiting for {self._status_fn}")
while not self.exists(self._status_fn):
self.log(f"Still waiting for {cmd}")
self.wait_for_file(self._status_fn, max_wait_sec=30)
contents = self.read(self._status_fn)
# if empty wait a bit to allow for race condition
if len(contents) == 0:
time.sleep(check_interval)
contents = self.read(self._status_fn)
status = int(contents.strip())
self.last_status = status
if status != 0:
if not ignore_errors:
raise RuntimeError(f"Command {cmd} returned status {status}")
else:
self.log(f"Warning: command {cmd} returned status {status}")
return status
def join(self, ignore_errors=False):
"""Waits until last executed command completed."""
assert self._status_fn, "Asked to join a task which hasn't had any commands executed on it"
check_interval = 0.2
status_fn = self._status_fn
if not self.wait_for_file(status_fn, max_wait_sec=30):
self.log(f"Retrying waiting for {status_fn}")
while not self.exists(status_fn):
self.log(f"Still waiting for {self._cmd}")
self.wait_for_file(status_fn, max_wait_sec=30)
contents = self.read(status_fn)
# if empty wait a bit to allow for race condition
if len(contents) == 0:
time.sleep(check_interval)
contents = self.read(status_fn)
status = int(contents.strip())
self.last_status = status
if status != 0:
extra_msg = '(ignoring error)' if ignore_errors else '(failing)'
if util.is_set('NCLUSTER_RUN_WITH_OUTPUT_ON_FAILURE') or True:
self.log(
f"Start failing output {extra_msg}: \n{'*'*80}\n\n '{self.read(self._out_fn)}'")
self.log(f"\n{'*'*80}\nEnd failing output")
if not ignore_errors:
raise RuntimeError(f"Command {self._cmd} returned status {status}")
else:
self.log(f"Warning: command {self._cmd} returned status {status}")
return status
def _run_with_output_on_failure(self, cmd, non_blocking=False,
ignore_errors=False,
max_wait_sec=365 * 24 * 3600,
check_interval=0.2) -> str:
"""Experimental version of run propagates error messages to client. This command will be default "run" eventually"""
if not self._can_run:
assert False, "Using .run before initialization finished"
if '\n' in cmd:
assert False, "Don't support multi-line for run2"
cmd = cmd.strip()
if cmd.startswith('#'): # ignore empty/commented out lines
return ''
self.run_counter += 1
self.log("tmux> %s", cmd)
self._cmd = cmd
self._cmd_fn = f'{self.remote_scratch}/{self.run_counter}.cmd'
self._status_fn = f'{self.remote_scratch}/{self.run_counter}.status'
self._out_fn = f'{self.remote_scratch}/{self.run_counter}.out'
cmd = util.shell_strip_comment(cmd)
assert '&' not in cmd, f"cmd {cmd} contains &, that breaks things"
# modify command to dump shell success status into file
self.file_write(self._cmd_fn, cmd + '\n')
# modified_cmd = f'{cmd} > {out_fn} 2>&1; echo $? > {status_fn}'
# https://stackoverflow.com/a/692407/419116
# $cmd > >(tee -a fn) 2> >(tee -a fn >&2)
modified_cmd = f'{cmd} > >(tee -a {self._out_fn}) 2> >(tee -a {self._out_fn} >&2); echo $? > {self._status_fn}'
modified_cmd = shlex.quote(modified_cmd)
start_time = time.time()
tmux_window = self.tmux_session + ':' + str(self.tmux_window_id)
tmux_cmd = f"tmux send-keys -t {tmux_window} {modified_cmd} Enter"
self._run_raw(tmux_cmd, ignore_errors=ignore_errors)
if non_blocking:
return 0
if not self.wait_for_file(self._status_fn, max_wait_sec=60):
self.log(f"Retrying waiting for {self._status_fn}")
elapsed_time = time.time() - start_time
while not self.exists(self._status_fn) and elapsed_time < max_wait_sec:
self.log(f"Still waiting for {cmd}")
self.wait_for_file(self._status_fn, max_wait_sec=60)
elapsed_time = time.time() - start_time
contents = self.read(self._status_fn)
# if empty wait a bit to allow for race condition
if len(contents) == 0:
time.sleep(check_interval)
contents = self.read(self._status_fn)
status = int(contents.strip())
self.last_status = status
if status != 0:
extra_msg = '(ignoring error)' if ignore_errors else '(failing)'
self.log(
f"Start failing output {extra_msg}: \n{'*'*80}\n\n '{self.read(self._out_fn)}'")
self.log(f"\n{'*'*80}\nEnd failing output")
if not ignore_errors:
raise RuntimeError(f"Command {cmd} returned status {status}")
else:
self.log(f"Warning: command {cmd} returned status {status}")
return self.read(self._out_fn)
def _run_raw(self, cmd: str, ignore_errors=False) -> Tuple[str, str]:
"""Runs given cmd in the task using current SSH session, returns
stdout/stderr as strings. Because it blocks until cmd is done, use it for
short cmds. Silently ignores failing commands.
This is a barebones method to be used during initialization that have
minimal dependencies (no tmux)
"""
# self._log("run_ssh: %s"%(cmd,))
stdin, stdout, stderr = u.call_with_retries(self.ssh_client.exec_command,
command=cmd, get_pty=True)
stdout_str = stdout.read().decode()
stderr_str = stderr.read().decode()
if stdout.channel.recv_exit_status() != 0:
if not ignore_errors:
self.log(f"command ({cmd}) failed with --->")
self.log("failing stdout: " + stdout_str)
self.log("failing stderr: " + stderr_str)
assert False, "_run_raw failed (see logs for error)"
return stdout_str, stderr_str
def upload(self, local_fn: str, remote_fn: str = '',
dont_overwrite: bool = False) -> None:
"""Uploads file to remote instance. If location not specified, dumps it
into default directory. If remote location has files or directories with the
same name, behavior is undefined."""
# support wildcard through glob
if '*' in local_fn:
for local_subfn in glob.glob(local_fn):
self.upload(local_subfn)
return
if '#' in local_fn: # hashes also give problems from shell commands
self.log("skipping backup file {local_fn}")
return
if not self.sftp:
self.sftp = u.call_with_retries(self.ssh_client.open_sftp,
'self.ssh_client.open_sftp')
def maybe_fix_mode(local_fn_, remote_fn_):
"""Makes remote file execute for locally executable files"""
mode = oct(os.stat(local_fn_)[stat.ST_MODE])[-3:]
if '7' in mode:
self.log(f"Making {remote_fn_} executable with mode {mode}")
# use raw run, in case tmux is unavailable
self._run_raw(f"chmod {mode} {remote_fn_}")
# augmented SFTP client that can transfer directories, from
# https://stackoverflow.com/a/19974994/419116
def _put_dir(source, target):
""" Uploads the contents of the source directory to the target path."""
def _safe_mkdir(path, mode=511, ignore_existing=True):
""" Augments mkdir by adding an option to not fail if the folder exists asdf asdf asdf as"""
try:
self.sftp.mkdir(path, mode)
except IOError:
if ignore_existing:
pass
else:
raise
assert os.path.isdir(source)
_safe_mkdir(target)
for item in os.listdir(source):
if os.path.isfile(os.path.join(source, item)):
self.sftp.put(os.path.join(source, item), os.path.join(target, item))
maybe_fix_mode(os.path.join(source, item), os.path.join(target, item))
else:
_safe_mkdir(f'{target}/{item}')
_put_dir(f'{source}/{item}', f'{target}/{item}')
if not remote_fn:
remote_fn = os.path.basename(local_fn)
self.log('uploading ' + local_fn + ' to ' + remote_fn)
remote_fn = remote_fn.replace('~', self.homedir)
if '/' in remote_fn:
remote_dir = os.path.dirname(remote_fn)
assert self.exists(
remote_dir), f"Remote dir {remote_dir} doesn't exist"
if dont_overwrite and self.exists(remote_fn):
self.log("Remote file %s exists, skipping" % (remote_fn,))
return
assert os.path.exists(local_fn), f"{local_fn} not found"
if os.path.isdir(local_fn):
_put_dir(local_fn, remote_fn)
else:
assert os.path.isfile(local_fn), "%s is not a file" % (local_fn,)
# this crashes with IOError when upload failed
if self.exists(remote_fn) and self.isdir(remote_fn):
remote_fn = remote_fn + '/' + os.path.basename(local_fn)
self.sftp.put(localpath=local_fn, remotepath=remote_fn)
maybe_fix_mode(local_fn, remote_fn)
def download(self, remote_fn, local_fn=''):
self.log("downloading %s" % remote_fn)
# sometimes open_sftp fails with Administratively prohibited, do retries
# root cause could be too many SSH connections being open
# https://unix.stackexchange.com/questions/14160/ssh-tunneling-error-channel-1-open-failed-administratively-prohibited-open
if not self.sftp:
self.sftp = u.call_with_retries(self.ssh_client.open_sftp,
'self.ssh_client.open_sftp')
if not local_fn:
local_fn = os.path.basename(remote_fn)
self.log("downloading %s to %s" % (remote_fn, local_fn))
self.sftp.get(remote_fn, local_fn)
def exists(self, remote_fn):
stdout, stderr = self._run_raw('stat ' + remote_fn, ignore_errors=True)
return 'No such file' not in stdout
def write(self, remote_fn, contents):
tmp_fn = self.local_scratch + '/' + str(util.now_micros())
open(tmp_fn, 'w').write(contents)
self.upload(tmp_fn, remote_fn)
def read(self, remote_fn):
tmp_fn = self.local_scratch + '/' + str(util.now_micros())
self.download(remote_fn, tmp_fn)
return open(tmp_fn).read()
def isdir(self, remote_fn):
stdout, _stderr = self._run_raw('ls -ld ' + remote_fn)
return stdout.startswith('d')
def switch_window(self, window_id: int):
"""
Switches currently active tmux window for given task. 0 is the default window
Args:
window_id: integer id of tmux window to use
"""
# windows are numbered sequentially 0, 1, 2, ...
# create any missing windows and make them point to the same directory
if window_id not in self.tmux_available_window_ids:
for i in range(max(self.tmux_available_window_ids) + 1, window_id + 1):
self._run_raw(f'tmux new-window -t {self.tmux_session} -d')
self.tmux_available_window_ids.append(i)
self.tmux_window_id = window_id
@property
def logdir(self):
"""Returns logging directory, creating one if necessary. See "Logdir" section
of design doc on naming convention"""
run_name = ncluster_globals.get_run_for_task(self)
logdir = ncluster_globals.get_logdir(run_name)
if logdir:
return logdir
# create logdir. Only single task in a group creates the logdir
if ncluster_globals.is_chief(self, run_name):
chief = self
else:
chief = ncluster_globals.get_chief(run_name)
chief.setup_logdir()
return ncluster_globals.get_logdir(run_name)
# release lock
def setup_logdir(self):
# todo: locking on logdir creation
"""Create logdir for task/job/run
"""
run_name = ncluster_globals.get_run_for_task(self)
self.log("Creating logdir for run " + run_name)
logdir_root = ncluster_globals.LOGDIR_ROOT
assert logdir_root
self.run(f'mkdir -p {logdir_root}')
find_command = f'find {logdir_root} -maxdepth 1 -type d'
stdout, stderr = self.run_with_output(find_command)
logdir = f"{logdir_root}/{run_name}"
counter = 0
while logdir in stdout:
counter += 1
new_logdir = f'{logdir_root}/{run_name}.{counter:02d}'
self.log(f'Warning, logdir {logdir} exists, deduping to {new_logdir}')
logdir = new_logdir
self.run(f'mkdir -p {logdir}')
ncluster_globals.set_logdir(run_name, logdir)
return logdir
# legacy methods
def file_exists(self, remote_fn):
return self.exists(remote_fn)
def file_write(self, *args, **kwargs):
return self.write(*args, **kwargs)
def file_read(self, remote_fn):
return self.read(remote_fn)
class Job(backend.Job):
pass
class Run(backend.Run):
"""Run is a collection of jobs that share state. IE, training run will contain gradient worker job, parameter
server job, and TensorBoard visualizer job. These jobs will use the same shared directory to store checkpoints and
event files.
:ivar aws_placement_group_name: somedoc
"""
placement_group: str # unique identifier to use as placement_group group name
jobs: List[Job]
def __init__(self, name='', **kwargs):
"""Creates a run. If install_script is specified, it's used as default
install_script for all jobs (can be overridden by Job constructor)"""
assert name, "Must specify name for current run"
jobs = []
self.name = name
self.jobs = jobs
self.kwargs = kwargs
util.log(f"Choosing placement_group for run {name}")
self.placement_group = name + '-' + util.random_id()
@property
def logdir(self):
# querying logdir has a side-effect of creation, so do it on chief task
chief_task = ncluster_globals.get_chief(self.name)
return chief_task.logdir
# TODO: currently this is synchronous, use non_blocking wrapper like in Job to parallelize methods
def run(self, *args, **kwargs):
"""Runs command on every job in the run."""
for job in self.jobs:
job.run(*args, **kwargs)
def run_with_output(self, *args, **kwargs):
"""Runs command on every first job in the run, returns stdout."""
for job in self.jobs:
job.run_with_output(*args, **kwargs)
def _run_raw(self, *args, **kwargs):
"""_run_raw on every job in the run."""
for job in self.jobs:
job._run_raw(*args, **kwargs)
def upload(self, *args, **kwargs):
"""Runs command on every job in the run."""
for job in self.jobs:
job.upload(*args, **kwargs)
def make_job(self, name='', **kwargs):
return make_job(name+'.'+self.name, run_name=self.name, **kwargs)
def make_task(
name: str = '',
run_name: str = '',
install_script: str = '',
instance_type: str = '',
image_name: str = '',
disk_size: int = 0,
preemptible=None,
logging_task: backend.Task = None,
create_resources=True,
spot=False
) -> Task:
"""
Create task on AWS.
Automatically places it in singleton Run/singleton Job objects, see Run/Job/Task hierarchy for details
https://docs.google.com/document/d/1Gg4T243cYrDUW1YDCikmqp7fzSQDU3rZxOkJr9ohhs8/edit#heading=h.j4td4oixogib
Args:
disk_size: default size of root disk, in GBs
create_resources: whether this task will handle resource creation
name: see ncluster.make_task
run_name: see ncluster.make_task
install_script: see ncluster.make_task
instance_type: instance type to use, defaults to $NCLUSTER_INSTANCE or t3.micro if unset
image_name: name of image, ie, "Deep Learning AMI (Ubuntu) Version 12.0", defaults to $NCLUSTER_IMAGE or amzn2-ami-hvm-2.0.20180622.1-x86_64-gp2 if unset
preemptible: use cheaper preemptible/spot instances
logging_task: partially initialized Task object, use it for logging
Returns:
"""
ncluster_globals.task_launched = True
def log(*_args):
if logging_task:
logging_task.log(*_args)
else:
util.log(*_args)
# if name not specified, use name which is the same across script invocations for given image/instance-type
name = ncluster_globals.auto_assign_task_name_if_needed(name, instance_type,
image_name)
if not instance_type:
instance_type = os.environ.get('NCLUSTER_INSTANCE', 't3.micro')
log("Using instance " + instance_type)
_set_aws_environment()
if create_resources:
_maybe_create_resources(logging_task=logging_task)
else:
pass
run: Run = ncluster_globals.get_run_object(run_name)
placement_group = ''
if u.instance_supports_placement_groups(instance_type) and run:
placement_group = run.placement_group
log(f"Launching into placement_group group {placement_group}")
u.maybe_create_placement_group(run.placement_group)
if not image_name:
image_name = os.environ.get('NCLUSTER_IMAGE',
GENERIC_SMALL_IMAGE)
log("Using image " + image_name)
if preemptible is None:
preemptible = os.environ.get('NCLUSTER_PREEMPTIBLE', False)
preemptible = bool(preemptible)
if preemptible:
log("Using preemptible instances")
image = u.lookup_image(image_name)
keypair = u.get_keypair()
security_group = u.get_security_group()
ec2 = u.get_ec2_resource()
instance = u.lookup_instance(name, instance_type,
image_name)
_maybe_start_instance(instance)
_maybe_wait_for_initializing_instance(instance)
# create the instance if not present
if instance:
log(f"Reusing {instance}")
else:
log(f"Allocating {instance_type} for task {name}")
args = {'ImageId': image.id,
'InstanceType': instance_type,
'MinCount': 1,
'MaxCount': 1,
'SecurityGroupIds': [security_group.id],
'KeyName': keypair.name}
args['TagSpecifications'] = [{
'ResourceType': 'instance',
'Tags': [{
'Key': 'Name',
'Value': name
}]
}]
# subnet = u.get_subnet()
# args['NetworkInterfaces'] = [{'SubnetId': subnet.id,
# 'DeviceIndex': 0,
# 'AssociatePublicIpAddress': True,
# 'Groups': [security_group.id]}]
# placement_specs = {'AvailabilityZone': u.get_zone()}
placement_specs = {}
if placement_group:
placement_specs['GroupName'] = placement_group
args['Placement'] = placement_specs
args['Monitoring'] = {'Enabled': True}
if disk_size:
assert disk_size > 0
ebs = {
'VolumeSize': disk_size,
'VolumeType': 'gp2',
}
args['BlockDeviceMappings'] = [{
'DeviceName': '/dev/sda1',
'Ebs': ebs
}]
# Use high throughput disk (0.065/iops-month = about $1/hour)
if 'NCLUSTER_AWS_FAST_ROOTDISK' in os.environ:
assert not disk_size, f"Specified both disk_size {disk_size} and $NCLUSTER_AWS_FAST_ROOTDISK, they are incompatible as $NCLUSTER_AWS_FAST_ROOTDISK hardwired disk size"
ebs = {
'VolumeSize': 500,
'VolumeType': 'io1',
'Iops': 11500
}
args['BlockDeviceMappings'] = [{
'DeviceName': '/dev/sda1',
'Ebs': ebs
}]
instances = []
try:
if spot:
instances = u.create_spot_instances(args)
else:
instances = ec2.create_instances(**args)
except Exception as e:
log(f"Instance creation for {name} failed with ({e})")
log(
"You can change availability zone using export NCLUSTER_ZONE=...")
log("Terminating")
os.kill(os.getpid(),
signal.SIGINT) # sys.exit() doesn't work inside thread
assert instances, f"ec2.create_instances returned {instances}"
log(f"Allocated {len(instances)} instances")
instance = instances[0]
task = Task(name, instance=instance,
install_script=install_script,
image_name=image_name,
instance_type=instance_type)
ncluster_globals.register_task(task, run_name)
return task
def make_job(
name: str = '',
run_name: str = '',
num_tasks: int = 1,
install_script: str = '',
instance_type: str = '',
image_name: str = '',
create_resources=True,
**kwargs) -> Job:
"""
Args:
create_resources: if True, will create resources if necessary
name: see backend.make_task
run_name: see backend.make_task
num_tasks: number of tasks to launch
install_script: see make_task
instance_type: see make_task
image_name: see make_task
Returns:
"""
assert num_tasks > 0, f"Can't create job with {num_tasks} tasks"
assert name.count(
'.') <= 1, "Job name has too many .'s (see ncluster design: Run/Job/Task hierarchy for convention)"
# dummy tasks for logging
tasks = [backend.Task(f"{i}.{name}") for i in range(num_tasks)]
_set_aws_environment(tasks[0])
if create_resources:
_maybe_create_resources(tasks[0])
name = ncluster_globals.auto_assign_job_name_if_needed(name)
run_name = ncluster_globals.auto_assign_run_name_if_needed(run_name)
_run = ncluster_globals.create_run_if_needed(run_name, make_run)
job = Job(name=name, tasks=tasks, run_name=run_name, **kwargs)
exceptions = []
# make tasks in parallel
def make_task_fn(i: int):
try:
tasks[i] = make_task(f"{i}.{name}", run_name=run_name,
install_script=install_script,
instance_type=instance_type, image_name=image_name,
logging_task=tasks[i],
create_resources=False,
# handle resources in job already
**kwargs)
except Exception as e:
exceptions.append(e)
util.log("Creating threads")
threads = [threading.Thread(name=f'make_task_{i}',
target=make_task_fn, args=[i])
for i in range(num_tasks)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
print("Exception are ", exceptions)
if exceptions:
raise exceptions[0]
job.tasks = tasks
# double check that all instances are in the same placement_group group
# this can happen if some instances from previous smaller run are getting reused
placement_dict = {task.instance.placement_group: task.name for task in
job.tasks}
# TODO: make placement_group group name derived from run, to make it deterministic
# on individual instance restarts
if len(placement_dict) > 1:
util.log("Job tasks are spread over multiple placement_group groups")
pprint.pprint(placement_dict)
raise RuntimeError(
f"Got instance spread over multiple placement_group groups: {placement_dict}. Must terminate all instances in run {run_name} and try again.")
return job
def make_run(name) -> Run:
run = Run(name)
ncluster_globals.register_run(name, run)
return run
# TODO: this method and a few others are backend specific, document in API doc
def _maybe_start_instance(instance):
"""Starts instance if it's stopped, no-op otherwise."""
if not instance:
return
if instance.state['Name'] == 'stopped':
instance.start()
while True:
print(f"Waiting for {instance} to start.")
instance.reload()
if instance.state['Name'] == 'running':
break
time.sleep(10)
def _maybe_wait_for_initializing_instance(instance):
"""Starts instance if it's stopped, no-op otherwise."""
if not instance:
return
if instance.state['Name'] == 'initializing':
while True:
print(f"Waiting for {instance} to leave state 'initializing'.")
instance.reload()
if instance.state['Name'] == 'running':
break
time.sleep(10)
def _maybe_create_resources(logging_task: Task = None):
"""Use heuristics to decide to possibly create resources"""
def log(*args):
if logging_task:
logging_task.log(*args)
else:
util.log(*args)
def should_create_resources():
"""Check if gateway, keypair, vpc exist."""
prefix = u.get_prefix()
if u.get_keypair_name() not in u.get_keypair_dict():
log(f"Missing {u.get_keypair_name()} keypair, creating resources")
return True
vpcs = u.get_vpc_dict()
if prefix not in vpcs:
log(f"Missing {prefix} vpc, creating resources")
return True
vpc = vpcs[prefix]
gateways = u.get_gateway_dict(vpc)
if prefix not in gateways:
log(f"Missing {prefix} gateway, creating resources")
return True
return False
try:
# this locking is approximate, still possible for threads to slip through
if os.path.exists(AWS_LOCK_FN):
pid, ts, lock_taskname = open(AWS_LOCK_FN).read().split('-')
ts = int(ts)
log(f"waiting for aws resource creation, another resource initiation was "
f"initiated {int(time.time()-ts)} seconds ago by "
f"{lock_taskname}, delete lock file "
f"{AWS_LOCK_FN} if this is an error")
while True:
if os.path.exists(AWS_LOCK_FN):
log(f"waiting for lock file {AWS_LOCK_FN} to get deleted "
f"initiated {int(time.time()-ts)} seconds ago by ")
time.sleep(2)
continue
else:
break
return
with open(AWS_LOCK_FN, 'w') as f:
f.write(
f'{os.getpid()}-{int(time.time())}-{logging_task.name if logging_task else ""}')
if not should_create_resources():
util.log("Resources already created, no-op")
os.remove(AWS_LOCK_FN)
return
create_lib.create_resources()
finally:
if os.path.exists(AWS_LOCK_FN):
os.remove(AWS_LOCK_FN)
def _set_aws_environment(task: Task = None):
"""Sets up AWS environment from NCLUSTER environment variables"""
current_zone = os.environ.get('NCLUSTER_ZONE', '')
current_region = os.environ.get('AWS_DEFAULT_REGION', '')
def log(*args):
if task:
task.log(*args)
else:
util.log(*args)
if current_region and current_zone:
assert current_zone.startswith(
current_region), f'Current zone "{current_zone}" ($NCLUSTER_ZONE) is not ' \
f'in current region "{current_region} ($AWS_DEFAULT_REGION)'
assert u.get_session().region_name == current_region # setting from ~/.aws
# zone is set, set region from zone
if current_zone and not current_region:
current_region = current_zone[:-1]
os.environ['AWS_DEFAULT_REGION'] = current_region
# neither zone nor region not set, use default setting for region
# if default is not set, use NCLUSTER_DEFAULT_REGION
if not current_region:
current_region = u.get_session().region_name
if not current_region:
log(f"No default region available, using {NCLUSTER_DEFAULT_REGION}")
current_region = NCLUSTER_DEFAULT_REGION
os.environ['AWS_DEFAULT_REGION'] = current_region
# zone not set, use first zone of the region
# if not current_zone:
# current_zone = current_region + 'a'
# os.environ['NCLUSTER_ZONE'] = current_zone
log(f"Using account {u.get_account_number()}, region {current_region}, "
f"zone {current_zone}")
|
train_pg_f18.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
"""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
# YOUR CODE HERE
with tf.variable_scope(scope):
x = input_placeholder
for i in range(n_layers):
x = tf.layers.dense(
inputs=x,
units=size,
activation=activation,
)
output_placeholder = tf.layers.dense(
inputs=x,
units=output_size,
activation=output_activation,
)
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Policy Gradient
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.reward_to_go = estimate_return_args['reward_to_go']
self.nn_baseline = estimate_return_args['nn_baseline']
self.normalize_advantages = estimate_return_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
# YOUR CODE HERE
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
# YOUR_CODE_HERE
sy_logits_na = build_mlp(
self.sy_ob_no,
output_size=self.ac_dim,
scope="nn",
n_layers=self.n_layers,
size=self.size,
activation=tf.nn.tanh,
)
return sy_logits_na
else:
# YOUR_CODE_HERE
sy_mean = build_mlp(
sy_ob_no,
output_size=self.ac_dim,
scope="nn",
n_layers=self.n_layers,
size=self.size,
activation=tf.nn.tanh,
)
sy_logstd = tf.get_variable(name="logstd", shape=[self.ac_dim])
return (sy_mean, sy_logstd)
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_CODE_HERE
sy_sampled_ac = tf.squeeze(
tf.multinomial(sy_logits_na, num_samples=1),
axis=[1])
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_CODE_HERE
z = tf.random_normal(shape=tf.shape(sy_mean))
sy_sampled_ac = sy_mean + tf.multiply(tf.exp(sy_logstd), z)
return sy_sampled_ac
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_CODE_HERE
sy_logprob_n = - tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=sy_ac_na, logits=sy_logits_na)
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_CODE_HERE
distribution = tfp.distributions.MultivariateNormalDiag(
loc=sy_mean, scale_diag=tf.exp(sy_logstd))
sy_logprob_n = distribution.log_prob(sy_ac_na)
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
#========================================================================================#
# ----------PROBLEM 2----------
# Loss Function and Training Operation
#========================================================================================#
# YOUR CODE HERE
loss = - tf.reduce_mean(tf.multiply(self.sy_logprob_n, self.sy_adv_n))
self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
#========================================================================================#
# ----------PROBLEM 6----------
# Optional Baseline
#
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
#========================================================================================#
if self.nn_baseline:
self.baseline_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_baseline",
n_layers=self.n_layers,
size=self.size))
# YOUR_CODE_HERE
self.sy_target_n = tf.placeholder(tf.float32, shape=[None])
baseline_loss = tf.losses.mean_squared_error(
labels=self.sy_target_n, predictions=self.baseline_prediction)
self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
# YOUR CODE HERE
ac = self.sess.run(self.sy_sampled_ac,
feed_dict={self.sy_ob_no: ob[np.newaxis, :]})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32)}
return path
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
def sum_of_rewards(self, re_n):
"""
Monte Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'ob_no' and 'ac_na' above.
"""
# YOUR_CODE_HERE
q = []
if self.reward_to_go:
for re in re_n:
res = []
future_reward = 0
for r in reversed(re):
future_reward = future_reward * self.gamma + r
res.append(future_reward)
q.append(np.array(res[::-1]))
else:
for re in re_n:
n = len(re)
discount = np.logspace(0, n-1, n, base=self.gamma)
q.append(np.repeat(np.sum(re * discount), n))
q_n = np.concatenate(q)
return q_n
def compute_advantage(self, ob_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
#====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
# YOUR CODE HERE
b_n = self.sess.run(self.baseline_prediction, feed_dict={self.sy_ob_no: ob_no})
b_n = np.mean(q_n) + (b_n - np.mean(b_n)) / np.std(b_n) * np.std(q_n)
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
return adv_n
def estimate_return(self, ob_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(ob_no, q_n)
#====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
#====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# YOUR_CODE_HERE
adv_n = (adv_n - np.mean(adv_n)) / np.std(adv_n)
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Optimizing Neural Network Baseline
#====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
# YOUR_CODE_HERE
target_n = (q_n - np.mean(q_n)) / np.std(q_n)
self.sess.run(self.baseline_update_op, feed_dict={
self.sy_ob_no: ob_no,
self.sy_target_n: target_n,
})
#====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
self.sess.run(self.update_op, feed_dict={
self.sy_ob_no: ob_no,
self.sy_ac_na: ac_na,
self.sy_adv_n: adv_n,
})
# _, loss_value, nll = self.sess.run(
# [self.update_op, self.loss, self.sy_nll],
# feed_dict={
# self.sy_ob_no: ob_no,
# self.sy_ac_na: ac_na,
# self.sy_adv_n: adv_n,
# },
# )
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
state.py
|
# -*- coding: utf-8 -*-
"""This class maintains the internal dfTimewolf state.
Use it to track errors, abort on global failures, clean up after modules, etc.
"""
from __future__ import print_function
from __future__ import unicode_literals
import sys
import threading
import traceback
from dftimewolf.lib import errors
from dftimewolf.lib import utils
from dftimewolf.lib.modules import manager as modules_manager
class DFTimewolfState(object):
"""The main State class.
Attributes:
config (dftimewolf.config.Config): Class to be used throughout execution.
errors (list[tuple[str, bool]]): errors generated by a module. These
should be cleaned up after each module run using the CleanUp() method.
global_errors (list[tuple[str, bool]]): the CleanUp() method moves non
critical errors to this attribute for later reporting.
input (list[str]): data that the current module will use as input.
output (list[str]): data that the current module generates.
recipe: (dict[str, str]): recipe declaring modules to load.
store (dict[str, object]): arbitrary data for modules.
"""
def __init__(self, config):
"""Initializes a state."""
super(DFTimewolfState, self).__init__()
self._command_line_options = None
self._module_pool = {}
self._store_lock = threading.Lock()
self._threading_event_per_module = {}
self.config = config
self.errors = []
self.global_errors = []
self.input = []
self.output = []
self.recipe = None
self.store = {}
def _InvokeModulesInThreads(self, callback):
"""Invokes the callback function on all the modules in separate threads.
Args:
callback (function): callback function to invoke on all the modules.
"""
threads = []
for module_definition in self.recipe['modules']:
thread_args = (module_definition, )
thread = threading.Thread(target=callback, args=thread_args)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
self.CheckErrors(is_global=True)
def LoadRecipe(self, recipe):
"""Populates the internal module pool with modules declared in a recipe.
Args:
recipe (dict[str, str]): recipe declaring modules to load.
Raises:
RecipeParseError: if a module in the recipe does not exist.
"""
self.recipe = recipe
for module_definition in recipe['modules']:
# Combine CLI args with args from the recipe description
module_name = module_definition['name']
module_class = modules_manager.ModulesManager.GetModuleByName(module_name)
if not module_class:
raise errors.RecipeParseError(
'Recipe uses unknown module: {0:s}'.format(module_name))
self._module_pool[module_name] = module_class(self)
def StoreContainer(self, container):
"""Thread-safe method to store data in the state's store.
Args:
container (AttributeContainer): data to store.
"""
with self._store_lock:
self.store.setdefault(container.CONTAINER_TYPE, []).append(container)
def GetContainers(self, container_class):
"""Thread-safe method to retrieve data from the state's store.
Args:
container_class (type): AttributeContainer class used to filter data.
Returns:
list[AttributeContainer]: attribute container objects provided in
the store that correspond to the container type.
"""
with self._store_lock:
return self.store.get(container_class.CONTAINER_TYPE, [])
def _SetupModuleThread(self, module_definition):
"""Calls the module's SetUp() function and sets a threading event for it.
Callback for _InvokeModulesInThreads.
Args:
module_definition (dict[str, str]): recipe module definition.
"""
module_name = module_definition['name']
new_args = utils.ImportArgsFromDict(
module_definition['args'], self._command_line_options, self.config)
module = self._module_pool[module_name]
try:
module.SetUp(**new_args)
except Exception as exception: # pylint: disable=broad-except
self.AddError(
'An unknown error occurred: {0!s}\nFull traceback:\n{1:s}'.format(
exception, traceback.format_exc()),
critical=True)
self._threading_event_per_module[module_name] = threading.Event()
self.CleanUp()
def SetupModules(self, command_line_options):
"""Performs setup tasks for each module in the module pool.
Threads declared modules' SetUp() functions. Takes CLI arguments into
account when replacing recipe parameters for each module.
Args:
command_line_options (argparse.Namespace): Command line options that
will be used to replace the parameters declared in the recipe.
"""
# Note that vars() copies the values of argparse.Namespace to a dict.
self._command_line_options = vars(command_line_options)
self._InvokeModulesInThreads(self._SetupModuleThread)
def _RunModuleThread(self, module_definition):
"""Runs the module's Process() function.
Callback for _InvokeModulesInThreads.
Waits for any blockers to have finished before running Process(), then
sets an Event flag declaring the module has completed.
Args:
module_definition (str): module definition.
"""
module_name = module_definition['name']
for dependency in module_definition['wants']:
self._threading_event_per_module[dependency].wait()
module = self._module_pool[module_name]
try:
module.Process()
except errors.DFTimewolfError as exception:
self.AddError(exception.message, critical=True)
except Exception as exception: # pylint: disable=broad-except
self.AddError(
'An unknown error occurred: {0!s}\nFull traceback:\n{1:s}'.format(
exception, traceback.format_exc()),
critical=True)
print('Module {0:s} completed'.format(module_name))
self._threading_event_per_module[module_name].set()
self.CleanUp()
def RunModules(self):
"""Performs the actual processing for each module in the module pool."""
self._InvokeModulesInThreads(self._RunModuleThread)
def AddError(self, error, critical=False):
"""Adds an error to the state.
Args:
error (str): text that will be added to the error list.
critical (Optional[bool]): True if dfTimewolf cannot recover from
the error and should abort.
"""
self.errors.append((error, critical))
def CleanUp(self):
"""Cleans up after running a module.
The state's output becomes the input for the next stage. Any errors are
moved to the global_errors attribute so that they can be reported at a
later stage.
"""
# Move any existing errors to global errors
self.global_errors.extend(self.errors)
self.errors = []
# Make the previous module's output available to the next module
self.input = self.output
self.output = []
def CheckErrors(self, is_global=False):
"""Checks for errors and exits if any of them are critical.
Args:
is_global (Optional[bool]): True if the global_errors attribute should
be checked. False if the error attribute should be checked.
"""
error_objects = self.global_errors if is_global else self.errors
if error_objects:
print('dfTimewolf encountered one or more errors:')
for error, critical in error_objects:
print('{0:s} {1!s}'.format('CRITICAL: ' if critical else '', error))
if critical:
print('Critical error found. Aborting.')
sys.exit(-1)
|
runner.py
|
#!/usr/bin/python
import base64
import donlib
import io
import cortexlib
import os
import sys
import threading
import time
from halocelery import tasks
from halocelery.apputils import Utility as util
from collections import deque
def main():
global slack_inbound
global slack_outbound
global health_string
global health_last_event_timestamp
global async_jobs
async_jobs = deque([])
health_last_event_timestamp = ""
health_string = ""
slack_inbound = deque([])
slack_outbound = deque([])
config = donlib.ConfigHelper()
"""First we make sure that all configs are sound..."""
check_configs(config)
"""Next, we start the Slack ingestion thread..."""
slack_consumer = threading.Thread(target=slack_in_manager, args=[config])
slack_consumer.daemon = True
slack_consumer.start()
"""Now, we start the Slack emitter..."""
slack_emitter = threading.Thread(target=slack_out_manager, args=[config])
slack_emitter.daemon = True
slack_emitter.start()
"""Next, our asynchronous job manager gets crunk"""
async_mgr = threading.Thread(target=async_manager, args=[config])
async_mgr.daemon = True
async_mgr.start()
"""Finally, we start up the Daemon Speaker"""
halo_enricher = threading.Thread(target=daemon_speaker, args=[config])
halo_enricher.daemon = True
halo_enricher.start()
msg = "Starting Don-Bot v%s\nName is set to %s" % (donlib.__version__,
config.slack_username)
util.log_stdout(msg)
msg = "Don-Bot sends general notifications to #%s" % config.slack_channel
util.log_stdout(msg)
if config.monitor_events == "yes":
util.log_stdout("Starting Halo event monitor")
halo_collector = threading.Thread(target=event_connector,
args=[config])
halo_collector.daemon = True
halo_collector.start()
while True:
s_consumer = (" Slack consumer alive: %s" %
str(slack_consumer.is_alive()))
s_emitter = (" Slack emitter alive: %s" %
str(slack_emitter.is_alive()))
h_enricher = (" Halo enricher alive: %s" %
str(halo_enricher.is_alive()))
a_manager = (" Async job manager alive: %s" %
str(async_mgr.is_alive()))
if config.monitor_events == "yes":
h_events = " Halo event monitor alive: %s\n Last event: %s" % (
halo_collector.is_alive(), health_last_event_timestamp)
else:
h_events = ""
health_string = "\n".join([s_consumer, s_emitter, h_enricher,
a_manager, h_events])
die_if_unhealthy(config.slack_channel)
time.sleep(30)
def die_if_unhealthy(slack_channel):
if "False" in health_string:
msg = health_string
msg += "\n\nDetected trouble in bot (see above). Bot app will restart."
channel = slack_channel
sad_note = (channel, msg)
slack_outbound.append(sad_note)
time.sleep(5)
sys.exit(2)
else:
pass
def event_connector(config):
global health_last_event_timestamp
halo = donlib.Halo(config, str(health_string), tasks)
events = donlib.HaloEvents(config)
quarantine = cortexlib.Quarantine(config)
ipblock = cortexlib.IpBlockCheck(config)
quarantine_check = False
ip_block_check = False
# We add a short delay in case of time drift between container and API
time.sleep(10)
while True:
for event in events:
quarantine_check = quarantine.should_quarantine(event)
ip_block_check = ipblock.should_block_ip(event)
health_last_event_timestamp = event["created_at"]
if not donlib.Utility.is_suppressed_event_type(config, event):
if donlib.Utility.event_is_critical(event):
util.log_stdout("EVENT_CONNECTOR: Critical event detected!") # NOQA
event_fmt = donlib.Formatter.format_item(event, "event")
slack_outbound.append((config.slack_channel, event_fmt))
if quarantine_check is not False:
async_jobs.append((config.slack_channel,
halo.quarantine_server(event)))
if ip_block_check is not False:
target_ip = ipblock.extract_ip_from_event(event)
target_zone_name = ipblock.ip_zone_name
async_jobs.append((config.slack_channel,
halo.add_ip_to_blocklist(target_ip,
target_zone_name)))
def daemon_speaker(config):
while True:
celery_tasks = tasks
halo = donlib.Halo(config, str(health_string), celery_tasks)
try:
message = slack_inbound.popleft()
channel = message["channel"]
halo_query, target = donlib.Lexicals.parse(message)
halo_results = halo.interrogate(halo_query, target)
util.log_stdout("DAEMON_SPEAKER: Results object type:%s" %
type(halo_results))
if isinstance(halo_results, (str, str)):
slack_outbound.append((channel, halo_results))
else:
util.log_stdout("DAEMON_SPEAKER: queueing up async job")
async_jobs.append((channel, halo_results))
except IndexError:
time.sleep(1)
return
def async_manager(config):
while True:
try:
job = async_jobs.popleft()
if job[1].ready():
if job[1].successful():
outbound_construct = (job[0], job[1].result)
slack_outbound.append(outbound_construct)
job[1].forget()
elif job[1].failed():
outbound_construct = (job[0], "REQUEST FAILED")
slack_outbound.append(outbound_construct)
job[1].forget()
else: # If not successful and not failed, throw it back.
async_jobs.append(job)
else:
async_jobs.append(job)
time.sleep(1)
except IndexError:
time.sleep(1)
def slack_in_manager(config):
slack = donlib.Slack(config)
for message in slack:
util.log_stdout("Message in slack consumer")
slack_inbound.append(message)
def slack_out_manager(config):
slack = donlib.Slack(config)
while True:
try:
message = slack_outbound.popleft()
try:
# Attempt to decode from Base64.
if "\n" in message[1]:
raise TypeError("Detected plaintext response...")
dec_msg = base64.decodestring(message[1])
util.log_stdout("Detected Base64-encoded file...")
slack.send_file(message[0], io.BytesIO(dec_msg).read(),
"Daemonic File")
except TypeError as e:
util.log_stdout(e)
slack.send_report(message[0], message[1],
"Daemonic Report")
except IndexError:
time.sleep(1)
def check_configs(config):
halo = donlib.Halo(config, "", "")
if halo.credentials_work() is False:
util.log_stdout("Halo credentials are bad! Exiting!")
sys.exit(1)
# If NOSLACK env var is set, don't go any further!
if os.getenv("NOSLACK"):
noslack_hold()
if config.sane() is False:
util.log_stdout("Configuration is bad! Exiting!")
sys.exit(1)
slack = donlib.Slack(config)
if slack.credentials_work() is False:
util.log_stdout("Slack credentials are bad! Exiting!")
sys.exit(1)
def noslack_hold():
msg = ("Slack integration is disabled. "
"Interact with Halo using:"
" 'docker exec -it cortex-bot python /app/interrogate.py'")
while True:
util.log_stdout(msg)
time.sleep(3600)
if __name__ == "__main__":
main()
|
executorselenium.py
|
import json
import os
import socket
import threading
import time
import traceback
import urlparse
import uuid
from .base import (CallbackHandler,
RefTestExecutor,
RefTestImplementation,
TestharnessExecutor,
extra_timeout,
strip_server)
from .protocol import (BaseProtocolPart,
TestharnessProtocolPart,
Protocol,
SelectorProtocolPart,
ClickProtocolPart,
SendKeysProtocolPart,
ActionSequenceProtocolPart,
TestDriverProtocolPart)
from ..testrunner import Stop
here = os.path.join(os.path.split(__file__)[0])
webdriver = None
exceptions = None
RemoteConnection = None
Command = None
def do_delayed_imports():
global webdriver
global exceptions
global RemoteConnection
global Command
from selenium import webdriver
from selenium.common import exceptions
from selenium.webdriver.remote.remote_connection import RemoteConnection
from selenium.webdriver.remote.command import Command
class SeleniumBaseProtocolPart(BaseProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def execute_script(self, script, async=False):
method = self.webdriver.execute_async_script if async else self.webdriver.execute_script
return method(script)
def set_timeout(self, timeout):
self.webdriver.set_script_timeout(timeout * 1000)
@property
def current_window(self):
return self.webdriver.current_window_handle
def set_window(self, handle):
self.webdriver.switch_to_window(handle)
def wait(self):
while True:
try:
self.webdriver.execute_async_script("")
except exceptions.TimeoutException:
pass
except (socket.timeout, exceptions.NoSuchWindowException,
exceptions.ErrorInResponseException, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
class SeleniumTestharnessProtocolPart(TestharnessProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
self.runner_handle = None
with open(os.path.join(here, "runner.js")) as f:
self.runner_script = f.read()
def load_runner(self, url_protocol):
if self.runner_handle:
self.webdriver.switch_to_window(self.runner_handle)
url = urlparse.urljoin(self.parent.executor.server_url(url_protocol),
"/testharness_runner.html")
self.logger.debug("Loading %s" % url)
self.webdriver.get(url)
self.runner_handle = self.webdriver.current_window_handle
format_map = {"title": threading.current_thread().name.replace("'", '"')}
self.parent.base.execute_script(self.runner_script % format_map)
def close_old_windows(self):
handles = [item for item in self.webdriver.window_handles if item != self.runner_handle]
for handle in handles:
try:
self.webdriver.switch_to_window(handle)
self.webdriver.close()
except exceptions.NoSuchWindowException:
pass
self.webdriver.switch_to_window(self.runner_handle)
return self.runner_handle
def get_test_window(self, window_id, parent, timeout=5):
"""Find the test window amongst all the open windows.
This is assumed to be either the named window or the one after the parent in the list of
window handles
:param window_id: The DOM name of the Window
:param parent: The handle of the runner window
:param timeout: The time in seconds to wait for the window to appear. This is because in
some implementations there's a race between calling window.open and the
window being added to the list of WebDriver accessible windows."""
test_window = None
end_time = time.time() + timeout
while time.time() < end_time:
try:
# Try using the JSON serialization of the WindowProxy object,
# it's in Level 1 but nothing supports it yet
win_s = self.webdriver.execute_script("return window['%s'];" % window_id)
win_obj = json.loads(win_s)
test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
except Exception:
pass
if test_window is None:
after = self.webdriver.window_handles
if len(after) == 2:
test_window = next(iter(set(after) - set([parent])))
elif after[0] == parent and len(after) > 2:
# Hope the first one here is the test window
test_window = after[1]
if test_window is not None:
assert test_window != parent
return test_window
time.sleep(0.1)
raise Exception("unable to find test window")
class SeleniumSelectorProtocolPart(SelectorProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def elements_by_selector(self, selector):
return self.webdriver.find_elements_by_css_selector(selector)
class SeleniumClickProtocolPart(ClickProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def element(self, element):
return element.click()
class SeleniumSendKeysProtocolPart(SendKeysProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_keys(self, element, keys):
return element.send_keys(keys)
class SeleniumActionSequenceProtocolPart(ActionSequenceProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_actions(self, actions):
self.webdriver.execute(Command.W3C_ACTIONS, {"actions": actions})
class SeleniumTestDriverProtocolPart(TestDriverProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_message(self, message_type, status, message=None):
obj = {
"type": "testdriver-%s" % str(message_type),
"status": str(status)
}
if message:
obj["message"] = str(message)
self.webdriver.execute_script("window.postMessage(%s, '*')" % json.dumps(obj))
class SeleniumProtocol(Protocol):
implements = [SeleniumBaseProtocolPart,
SeleniumTestharnessProtocolPart,
SeleniumSelectorProtocolPart,
SeleniumClickProtocolPart,
SeleniumSendKeysProtocolPart,
SeleniumTestDriverProtocolPart,
SeleniumActionSequenceProtocolPart]
def __init__(self, executor, browser, capabilities, **kwargs):
do_delayed_imports()
super(SeleniumProtocol, self).__init__(executor, browser)
self.capabilities = capabilities
self.url = browser.webdriver_url
self.webdriver = None
def connect(self):
"""Connect to browser via Selenium's WebDriver implementation."""
self.logger.debug("Connecting to Selenium on URL: %s" % self.url)
self.webdriver = webdriver.Remote(command_executor=RemoteConnection(self.url.strip("/"),
resolve_ip=False),
desired_capabilities=self.capabilities)
def after_conect(self):
pass
def teardown(self):
self.logger.debug("Hanging up on Selenium session")
try:
self.webdriver.quit()
except Exception:
pass
del self.webdriver
def is_alive(self):
try:
# Get a simple property over the connection
self.webdriver.current_window_handle
# TODO what exception?
except (socket.timeout, exceptions.ErrorInResponseException):
return False
return True
def after_connect(self):
self.testharness.load_runner(self.executor.last_environment["protocol"])
class SeleniumRun(object):
def __init__(self, func, protocol, url, timeout):
self.func = func
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
timeout = self.timeout
try:
self.protocol.base.set_timeout((timeout + extra_timeout))
except exceptions.ErrorInResponseException:
self.logger.error("Lost WebDriver connection")
return Stop
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(timeout + 2 * extra_timeout)
if self.result is None:
if flag:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self._run fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "self._run didn't set a result")
else:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.protocol, self.url, self.timeout)
except exceptions.TimeoutException:
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, exceptions.ErrorInResponseException):
self.result = False, ("CRASH", None)
except Exception as e:
message = str(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class SeleniumTestharnessExecutor(TestharnessExecutor):
supports_testdriver = True
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None,
**kwargs):
"""Selenium-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser, capabilities)
with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
self.script_resume = f.read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
if new_environment["protocol"] != self.last_environment["protocol"]:
self.protocol.testharness.load_runner(new_environment["protocol"])
def do_test(self, test):
url = self.test_url(test)
success, data = SeleniumRun(self.do_testharness,
self.protocol,
url,
test.timeout * self.timeout_multiplier).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, protocol, url, timeout):
format_map = {"url": strip_server(url)}
parent_window = protocol.testharness.close_old_windows()
# Now start the test harness
protocol.base.execute_script("window.open('about:blank', '%s', 'noopener')" % self.window_id)
test_window = protocol.testharness.get_test_window(self.window_id, parent_window,
timeout=5*self.timeout_multiplier)
self.protocol.base.set_window(test_window)
protocol.webdriver.get(url)
handler = CallbackHandler(self.logger, protocol, test_window)
while True:
result = protocol.base.execute_script(
self.script_resume % format_map, async=True)
done, rv = handler(result)
if done:
break
return rv
class SeleniumRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, capabilities=None, **kwargs):
"""Selenium WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.close_after_done = close_after_done
self.has_window = False
with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
self.logger.info("Test requires OS-level window focus")
width_offset, height_offset = self.protocol.webdriver.execute_script(
"""return [window.outerWidth - window.innerWidth,
window.outerHeight - window.innerHeight];"""
)
self.protocol.webdriver.set_window_size(600 + width_offset, 600 + height_offset)
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
return SeleniumRun(self._screenshot,
self.protocol,
self.test_url(test),
test.timeout).run()
def _screenshot(self, protocol, url, timeout):
webdriver = protocol.webdriver
webdriver.get(url)
webdriver.execute_async_script(self.wait_script)
screenshot = webdriver.get_screenshot_as_base64()
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot
|
lcd20x4_i2c.py
|
#!/usr/bin/env python2.7
"""Handle messages from Node-RED and display them on the LCD"""
# Topic: Send message to 20x4 LCD
#
# file : LCD20x4-I2C.py
import time
import threading
import json
import sys
import math
import lcddriver
if len(sys.argv) == 5:
CMD = sys.argv[1].lower()
LCD_TYPE = sys.argv[2].lower()
SCROLL_SPEED = int(sys.argv[3])
lcddriver.ADDRESS = int(sys.argv[4], 16)
lcd_error = False;
try:
LCD = lcddriver.lcd()
except:
print "LCD Not Found - Check Addess and Connections"
lcd_error = True;
if (not lcd_error):
STARTUPMSG = [" SYSTEM ",
" START ",
" UP ",
" "]
SHUTDOWNMSG = [" SYSTEM ",
" SHUT ",
" DOWN ",
" "]
ERRORMSG = [" ERROR ",
" DETECTED ",
" ",
" "]
CLS = [" ",
" ",
" ",
" "]
def translate(value, left_min, left_max, right_min, right_max):
"""Translate string (handles scrolling effect)"""
global UPDATE_SCREEN_THREAD_STOP
if (left_min > left_max or right_min > right_max or value < left_min or value > left_max or
not isinstance(value, (int, long, float, complex))):
if not isinstance(value, (int, long, float, complex)):
error = "Scroll Speed Value NaN"
else:
error = "Scroll Speed Value Error"
updatescreen(ERRORMSG, SCROLL_SPEED, UPDATE_SCREEN_THREAD_STOP)
print error
return False
else:
left_span = left_max - left_min
right_span = right_max - right_min
value_scaled = float(value - left_min) / float(left_span)
return right_min + (value_scaled * right_span)
def updatescreen(input_msg, sleep, stop_event):
"""Send message to screen"""
sleep = translate(sleep, 1, 10, 0.1, 2) #update this range to affect scroll speed
if not sleep:
return
messages = [input_msg[0], input_msg[1], input_msg[2], input_msg[3]]
scrollinglines = []
for number in range(0, 4):
if len(messages[number]) > 20:
truncated = messages[number][:19] + "*"
scrollinglines.append(number)
else:
truncated = messages[number] + " "*(20 - len(messages[number]))
LCD.lcd_display_string(truncated, number+1)
time.sleep(0.05)
while (not stop_event.is_set() and scrollinglines):
for line in scrollinglines:
LCD.lcd_display_string(messages[line][:19] + "*", line+1)
time.sleep(sleep*1.5)
for character in range(1, len(messages[line])-18):
if stop_event.is_set():
break
if character >= len(messages[line]) - 19:
truncated = "*" + messages[line][character:character+19]
else:
truncated = "*" + messages[line][character:character+18] + "*"
LCD.lcd_display_string(truncated, line+1)
time.sleep(sleep)
if stop_event.is_set():
for reset_line in range(0, 4):
LCD.lcd_display_string(" "*20, reset_line+1)
break
else:
time.sleep(sleep*1.5)
UPDATE_SCREEN_THREAD_STOP = threading.Event()
UPDATE_SCREEN_THREAD = threading.Thread(target=updatescreen, args=(STARTUPMSG, SCROLL_SPEED, UPDATE_SCREEN_THREAD_STOP))
UPDATE_SCREEN_THREAD.start()
def pad_str(pos, input_str):
"""Pad leading spaces if pos has a value"""
global UPDATE_SCREEN_THREAD_STOP
if (isinstance(input_str, basestring) and isinstance(pos, (int, long, float, complex)) and
pos > 0 and pos <= 20):
input_str = " "*(pos-1) + input_str
return input_str
else:
if not isinstance(input_str, basestring):
error = "Message not a String"
if not isinstance(pos, (int, long, float, complex)):
error = "Message Position NaN"
elif (pos < 1 or pos >= 20):
error = "Message Position not 1-20"
updatescreen(ERRORMSG, SCROLL_SPEED, UPDATE_SCREEN_THREAD_STOP)
print error
return False
def center_str(input_str):
"""Center the string based on length"""
if isinstance(input_str, basestring):
pad = int(math.floor(((20-len(input_str))/2)))
input_str = " "*(pad) + input_str
return input_str
else:
print "Message not a String"
return False
def main():
"""main function"""
global UPDATE_SCREEN_THREAD
global UPDATE_SCREEN_THREAD_STOP
if sys.version_info >= (3, 0):
print "Sorry - currently only configured to work with python 2.x"
sys.exit(1)
if CMD == "writelcd":
if LCD_TYPE == "20x4":
while True:
try:
data = raw_input()
if data == 'close':
if UPDATE_SCREEN_THREAD.isAlive():
UPDATE_SCREEN_THREAD_STOP.set()
while UPDATE_SCREEN_THREAD.isAlive():
time.sleep(0.05)
updatescreen(SHUTDOWNMSG, SCROLL_SPEED, UPDATE_SCREEN_THREAD_STOP)
sys.exit(0)
else:
if UPDATE_SCREEN_THREAD.isAlive():
UPDATE_SCREEN_THREAD_STOP.set()
while UPDATE_SCREEN_THREAD.isAlive():
time.sleep(0.05)
json_error = False
#speederror = False
poserror = False
centererror = False
try:
data = json.loads(data)
except:
print "Input not a JSON Message"
json_error = True
if not json_error:
msg = []
for line in range(0, 4):
try:
if data['msgs'][line]['center'] is True:
if len(data['msgs'][line]['msg']) < 21:
msg.append(center_str(data['msgs'][line]['msg']))
if not msg[line]:
centererror = True
break
else:
msg.append(data['msgs'][line]['msg'])
else:
raise KeyError
except KeyError:
try:
msg.append(pad_str(data['msgs'][line]['pos'],
data['msgs'][line]['msg']))
if not msg[line]:
poserror = True
except KeyError:
print "POS or msg Value Missing"
poserror = True
break
if not poserror and not centererror:
UPDATE_SCREEN_THREAD_STOP = threading.Event()
UPDATE_SCREEN_THREAD = threading.Thread(target=updatescreen, args=(msg, SCROLL_SPEED, UPDATE_SCREEN_THREAD_STOP))
UPDATE_SCREEN_THREAD.start()
else:
updatescreen(ERRORMSG, 3, UPDATE_SCREEN_THREAD_STOP)
else:
updatescreen(ERRORMSG, 3, UPDATE_SCREEN_THREAD_STOP)
except (EOFError, KeyboardInterrupt):
if UPDATE_SCREEN_THREAD.isAlive():
UPDATE_SCREEN_THREAD_STOP.set()
while UPDATE_SCREEN_THREAD.isAlive():
time.sleep(0.05)
updatescreen(SHUTDOWNMSG, 3, UPDATE_SCREEN_THREAD_STOP)
sys.exit(0)
except SystemExit:
if UPDATE_SCREEN_THREAD.isAlive():
UPDATE_SCREEN_THREAD_STOP.set()
while UPDATE_SCREEN_THREAD.isAlive():
time.sleep(0.05)
updatescreen(SHUTDOWNMSG, 3, UPDATE_SCREEN_THREAD_STOP)
sys.exit(0)
else:
print "Bad parameters - accepts writelcd {screensize}"
if __name__ == '__main__':
main()
|
diff-filterer.py
|
#!/usr/bin/python
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime, filecmp, math, multiprocessing, os, psutil, shutil, subprocess, stat, sys
from collections import OrderedDict
def usage():
print("""Usage: diff-filterer.py [--assume-no-side-effects] [--assume-input-states-are-correct] [--try-fail] [--work-path <workpath>] [--num-jobs <count>] [--debug] <passingPath> <failingPath> <shellCommand>
diff-filterer.py attempts to transform (a copy of) the contents of <passingPath> into the contents of <failingPath> subject to the constraint that when <shellCommand> is run in that directory, it returns 0
OPTIONS
--assume-no-side-effects
Assume that the given shell command does not make any (relevant) changes to the given directory, and therefore don't wipe and repopulate the directory before each invocation of the command
--assume-input-states-are-correct
Assume that <shellCommand> passes in <passingPath> and fails in <failingPath> rather than re-verifying this
--try-fail
Invert the success/fail status of <shellCommand> and swap <passingPath> and <failingPath>
That is, instead of trying to transform <passingPath> into <failingPath>, try to transform <failingPath> into <passingPath>
--work-path <filepath>
File path to use as the work directory for testing the shell command
This file path will be overwritten and modified as needed for testing purposes, and will also be the working directory of the shell command when it is run
--num-jobs <count>
The maximum number of concurrent executions of <shellCommand> to spawn at once
Specify 'auto' to have diff-filterer.py dynamically adjust the number of jobs based on system load
--debug
Enable some debug checks in diff-filterer.py
""")
sys.exit(1)
debug = False
# Miscellaneous file utilities
class FileIo(object):
def __init__(self):
return
def ensureDirExists(self, filePath):
if not os.path.isdir(filePath):
if os.path.isfile(filePath) or os.path.islink(filePath):
os.remove(filePath)
os.makedirs(filePath)
def copyFile(self, fromPath, toPath):
self.ensureDirExists(os.path.dirname(toPath))
self.removePath(toPath)
if os.path.islink(fromPath):
linkText = os.readlink(fromPath)
os.symlink(linkText, toPath)
else:
shutil.copy2(fromPath, toPath)
def writeFile(self, path, text):
f = open(path, "w+")
f.write(text)
f.close()
def writeScript(self, path, text):
self.writeFile(path, text)
os.chmod(path, 0755)
def removePath(self, filePath):
if len(os.path.split(filePath)) < 2:
raise Exception("Will not remove path at " + filePath + "; is too close to the root of the filesystem")
if os.path.islink(filePath):
os.remove(filePath)
elif os.path.isdir(filePath):
shutil.rmtree(filePath)
elif os.path.isfile(filePath):
os.remove(filePath)
def join(self, path1, path2):
return os.path.normpath(os.path.join(path1, path2))
# tells whether <parent> either contains <child> or is <child>
def contains(self, parent, child):
if parent == child:
return True
return child.startswith(parent + "/")
# returns the common prefix of two paths. For example, commonPrefixOf2("a/b/c", "a/b/cat") returns "a/b"
def commonPrefixOf2(self, path1, path2):
prefix = path2
while True:
if self.contains(prefix, path1):
return prefix
parent = os.path.dirname(prefix)
if parent == prefix:
return None
prefix = parent
# returns the common prefix of multiple paths
def commonPrefix(self, paths):
if len(paths) < 1:
return None
result = paths[0]
for path in paths:
prev = result
result = self.commonPrefixOf2(result, path)
if result is None:
return result
return result
fileIo = FileIo()
# Runs a shell command
class ShellScript(object):
def __init__(self, commandText, cwd):
self.commandText = commandText
self.cwd = cwd
def process(self):
cwd = self.cwd
print("Running '" + self.commandText + "' in " + cwd)
try:
subprocess.check_call(["bash", "-c", "cd " + cwd + " && " + self.commandText])
return 0
except subprocess.CalledProcessError as e:
return e.returncode
# Base class that can hold the state of a file
class FileContent(object):
def apply(self, filePath):
pass
def equals(self, other, checkWithFileSystem=False):
pass
# A FileContent that refers to the content of a specific file
class FileBacked_FileContent(FileContent):
def __init__(self, referencePath):
super(FileBacked_FileContent, self).__init__()
self.referencePath = referencePath
self.isLink = os.path.islink(self.referencePath)
def apply(self, filePath):
fileIo.copyFile(self.referencePath, filePath)
def equals(self, other, checkWithFileSystem=False):
if not isinstance(other, FileBacked_FileContent):
return False
if self.referencePath == other.referencePath:
return True
if not checkWithFileSystem:
return False
if self.isLink and other.isLink:
return os.readlink(self.referencePath) == os.readlink(other.referencePath)
if self.isLink != other.isLink:
return False # symlink not equal to non-symlink
return filecmp.cmp(self.referencePath, other.referencePath)
def __str__(self):
return self.referencePath
# A FileContent describing the nonexistence of a file
class MissingFile_FileContent(FileContent):
def __init__(self):
super(MissingFile_FileContent, self).__init__()
def apply(self, filePath):
fileIo.removePath(filePath)
def equals(self, other, checkWithFileSystem=False):
return isinstance(other, MissingFile_FileContent)
def __str__(self):
return "Empty"
# A FileContent describing a directory
class Directory_FileContent(FileContent):
def __init__(self):
super(Directory_FileContent, self).__init__()
def apply(self, filePath):
fileIo.ensureDirExists(filePath)
def equals(self, other, checkWithFileSystem=False):
return isinstance(other, Directory_FileContent)
def __str__(self):
return "[empty dir]"
# A collection of many FileContent objects
class FilesState(object):
def __init__(self):
self.fileStates = OrderedDict()
def apply(self, filePath):
for relPath, state in self.fileStates.iteritems():
state.apply(fileIo.join(filePath, relPath))
def add(self, filePath, fileContent):
self.fileStates[filePath] = fileContent
def addAllFrom(self, other):
for filePath in other.fileStates:
self.add(filePath, other.fileStates[filePath])
def getContent(self, filePath):
if filePath in self.fileStates:
return self.fileStates[filePath]
return None
# returns a FilesState resembling <self> but without the keys for which other[key] == self[key]
def withoutDuplicatesFrom(self, other, checkWithFileSystem=False):
result = FilesState()
for filePath, fileState in self.fileStates.iteritems():
otherContent = other.getContent(filePath)
if not fileState.equals(otherContent, checkWithFileSystem):
result.add(filePath, fileState)
return result
# returns self[fromIndex:toIndex]
def slice(self, fromIndex, toIndex):
result = FilesState()
for filePath in self.fileStates.keys()[fromIndex:toIndex]:
result.fileStates[filePath] = self.fileStates[filePath]
return result
def restrictedToKeysIn(self, other):
result = FilesState()
for filePath, fileState in self.fileStates.iteritems():
if filePath in other.fileStates:
result.add(filePath, fileState)
return result
# returns a FilesState having the same keys as this FilesState, but with values taken from <other> when it has them, and <self> otherwise
def withConflictsFrom(self, other, listEmptyDirs = False):
result = FilesState()
for filePath, fileContent in self.fileStates.iteritems():
if filePath in other.fileStates:
result.add(filePath, other.fileStates[filePath])
else:
result.add(filePath, fileContent)
if listEmptyDirs:
oldImpliedDirs = self.listImpliedDirs()
newImpliedDirs = result.listImpliedDirs()
for impliedDir in oldImpliedDirs:
if impliedDir not in newImpliedDirs and impliedDir not in result.fileStates:
result.add(impliedDir, MissingFile_FileContent())
return result
def checkSameKeys(self, other):
a = self.checkContainsKeys(other)
b = other.checkContainsKeys(self)
if a and b:
return True
if not a:
print("a does not contain all of the keys from b")
if not b:
print("b does not contain all of the keys from a")
return False
def checkContainsKeys(self, other):
contains = True
for f in other.fileStates.keys():
if f not in self.fileStates:
print("Found in " + other.summarize() + " but not in " + self.summarize() + ": " + f)
contains = False
return contains
# returns a set of paths to all of the dirs in <self> that are implied by any files in <self>
def listImpliedDirs(self):
dirs = set()
empty = MissingFile_FileContent()
keys = [key for (key, value) in self.fileStates.iteritems() if not empty.equals(value)]
i = 0
while i < len(keys):
path = keys[i]
parent, child = os.path.split(path)
if parent == "":
parent = "."
if not parent in dirs:
dirs.add(parent)
keys.append(parent)
i += 1
return dirs
# returns a FilesState having all of the entries from <self>, plus empty entries for any keys in <other> not in <self>
def expandedWithEmptyEntriesFor(self, other):
impliedDirs = self.listImpliedDirs()
# now look for entries in <other> not present in <self>
result = self.clone()
for filePath in other.fileStates:
if filePath not in result.fileStates and filePath not in impliedDirs:
result.fileStates[filePath] = MissingFile_FileContent()
return result
def clone(self):
result = FilesState()
for path, content in self.fileStates.iteritems():
result.add(path, content)
return result
def withoutEmptyEntries(self):
result = FilesState()
empty = MissingFile_FileContent()
for path, state in self.fileStates.iteritems():
if not empty.equals(state):
result.add(path, state)
return result
def getCommonDir(self):
result = fileIo.commonPrefix(self.fileStates.keys())
return result
# Returns a list of FilesState objects each containing a different subdirectory of <self>
# If groupDirectFilesTogether == True, then all files directly under self.getCommonDir() will be assigned to the same group
def groupByDirs(self, groupDirectFilesTogether = False):
if len(self.fileStates) <= 1:
if len(self.fileStates) == 1:
return [self]
return []
commonDir = self.getCommonDir()
if commonDir is None:
prefixLength = 0
else:
prefixLength = len(commonDir) + 1 # skip the following '/'
groupsByDir = {}
for filePath, fileContent in self.fileStates.iteritems():
subPath = filePath[prefixLength:]
slashIndex = subPath.find("/")
if slashIndex < 0:
if groupDirectFilesTogether:
firstDir = ""
else:
firstDir = subPath
else:
firstDir = subPath[:slashIndex]
if not firstDir in groupsByDir:
groupsByDir[firstDir] = FilesState()
groupsByDir[firstDir].add(filePath, fileContent)
return [group for group in groupsByDir.values()]
# splits into multiple, smaller, FilesState objects
def splitOnce(self, maxNumChildren = 2):
if self.size() <= 1:
return [self]
children = self.groupByDirs(True)
if len(children) == 1:
children = children[0].groupByDirs(False)
if len(children) > maxNumChildren:
# If there are lots of child directories, we still want to test a smaller number of larger groups before testing smaller groups
# So we arbitrarily recombine child directories to make a smaller number of children
minIndex = 0
mergedChildren = []
for i in range(maxNumChildren):
maxIndex = len(children) * (i + 1) / maxNumChildren
merge = FilesState()
for child in children[minIndex:maxIndex]:
merge.addAllFrom(child)
mergedChildren.append(merge)
minIndex = maxIndex
children = mergedChildren
return children
def summarize(self):
numFiles = self.size()
commonDir = self.getCommonDir()
if numFiles <= 4:
return str(self)
if commonDir is not None:
return str(numFiles) + " files under " + str(commonDir)
return str(numFiles) + " files"
def size(self):
return len(self.fileStates)
def __str__(self):
if len(self.fileStates) == 0:
return "[empty fileState]"
entries = []
for filePath, state in self.fileStates.iteritems():
entries.append(filePath + " -> " + str(state))
if len(self.fileStates) > 1:
prefix = str(len(entries)) + " entries:\n"
else:
prefix = "1 entry: "
return prefix + "\n".join(entries)
# Creates a FilesState matching the state of a directory on disk
def filesStateFromTree(rootPath):
rootPath = os.path.abspath(rootPath)
paths = []
states = {}
for root, dirPaths, filePaths in os.walk(rootPath, topdown=True):
if len(filePaths) == 0 and len(dirPaths) == 0:
relPath = os.path.relpath(root, rootPath)
paths.append(relPath)
states[relPath] = Directory_FileContent()
# include every file and every symlink (even if the symlink points to a dir)
leaves = filePaths
for dirPath in dirPaths:
fullPath = os.path.join(root, dirPath)
if os.path.islink(fullPath):
leaves.append(dirPath)
for filePath in leaves:
fullPath = fileIo.join(root, filePath)
relPath = os.path.relpath(fullPath, rootPath)
paths.append(relPath)
states[relPath] = FileBacked_FileContent(fullPath)
paths = sorted(paths)
state = FilesState()
for path in paths:
state.add(path, states[path])
return state
class FilesState_HyperBoxNode(object):
def __init__(self, dimensions):
self.dimensions = dimensions
self.children = []
if len(dimensions) > 1:
nextDimensions = dimensions[1:]
for i in range(dimensions[0]):
self.children.append(FilesState_HyperBoxNode(nextDimensions))
else:
for i in range(dimensions[0]):
self.children.append(FilesState_LeafBox())
def getFiles(self, coordinates):
child = self.children[coordinates[0]]
return child.getFiles(coordinates[1:])
def setFiles(self, coordinates, files):
self.children[coordinates[0]].setFiles(coordinates[1:], files)
def clearFiles(self, coordinates):
self.children[coordinates[0]].clearFiles(coordinates[1:])
def removeSlice(self, dimension, index):
if dimension == 0:
del self.children[index]
else:
for child in self.children:
child.removeSlice(dimension - 1, index)
def getSlice(self, dimension, index):
result = FilesState()
for i in range(len(self.children)):
if dimension != 0 or i == index:
child = self.children[i]
childResult = child.getSlice(dimension - 1, index)
result = result.expandedWithEmptyEntriesFor(childResult).withConflictsFrom(childResult)
return result
class FilesState_LeafBox(object):
def __init__(self):
self.files = FilesState()
def getFiles(self, coordinates):
return self.files
def setFiles(self, coordinates, files):
self.files = files
def clearFiles(self, coordinates):
self.files = FilesState()
def removeSlice(self, dimensions, index):
return
def getSlice(self, dimension, index):
return self.getFiles([])
class FilesState_HyperBox(object):
def __init__(self, dimensions):
self.dimensions = dimensions
self.durations = []
self.numFiles = 0
if len(dimensions) < 1:
raise Exception("dimensions must be nonempty: " + str(dimensions))
for length in dimensions:
if length < 1:
raise Exception("Illegal dimension " + str(length) + " in " + str(dimensions))
self.durations.append([None] * length)
self.root = FilesState_HyperBoxNode(dimensions)
def getNumDimensions(self):
return len(self.dimensions)
def getSize(self, dimension):
return self.dimensions[dimension]
def getDimensions(self):
return self.dimensions
def getSliceDuration(self, dimension, index):
return self.durations[dimension][index]
def setSliceDuration(self, dimension, index, value):
durations = self.durations[dimension]
if index >= len(durations):
raise Exception("Index " + str(index) + " too large for durations " + str(durations) + " of length " + str(len(durations)) + ". All durations: " + str(self.durations))
durations[index] = value
def removeSlice(self, dimension, index):
durations = self.durations[dimension]
del durations[index]
self.root.removeSlice(dimension, index)
self.dimensions[dimension] -= 1
def getFastestIndex(self, dimension):
durations = self.durations[dimension]
fastestValue = None
fastestIndex = None
for i in range(len(durations)):
value = durations[i]
if value is not None:
if fastestValue is None or value < fastestValue:
fastestValue = value
fastestIndex = i
return fastestIndex
def getFastestIndices(self):
return [self.getFastestIndex(dimension) for dimension in range(self.getNumDimensions())]
def getFiles(self, coordinates):
return self.root.getFiles(coordinates)
def setFiles(self, dimensions, files):
self.root.setFiles(dimensions, files)
self.numFiles = None
def clearFiles(self, dimensions):
self.setFiles(dimensions, FilesState())
def getNumFiles(self):
if self.numFiles is None:
numFiles = 0
for child in self.getChildren():
numFiles += child.size()
self.numFiles = numFiles
return self.numFiles
def getSlice(self, dimension, index):
return self.root.getSlice(dimension, index)
def incrementCoordinates(self, coordinates):
coordinates = coordinates[:]
for i in range(len(coordinates)):
coordinates[i] += 1
if coordinates[i] >= self.dimensions[i]:
coordinates[i] = 0
else:
return coordinates
return None
def getChildren(self):
if len(self.dimensions) < 1 or self.dimensions[0] < 1:
return []
coordinates = [0] * len(self.dimensions)
children = []
while coordinates is not None:
child = self.getFiles(coordinates)
if child is not None and child.size() > 0:
children.append(child)
coordinates = self.incrementCoordinates(coordinates)
return children
def getNumChildren(self):
return len(self.getChildren())
def getAllFiles(self):
files = FilesState()
for child in self.getChildren():
files = files.expandedWithEmptyEntriesFor(child).withConflictsFrom(child)
return files
def boxFromList(fileStates):
numStates = len(fileStates)
if numStates == 1:
dimensions = [1]
else:
dimensions = []
while numStates > 1:
if numStates == 4:
# if there are 4 states we want to make it a 2x2
nextDimension = 2
else:
nextDimension = min(3, numStates)
dimensions.append(nextDimension)
numStates = int(math.ceil(float(numStates) / float(nextDimension)))
tree = FilesState_HyperBox(dimensions)
coordinates = [0] * len(dimensions)
for state in fileStates:
tree.setFiles(coordinates, state)
coordinates = tree.incrementCoordinates(coordinates)
return tree
# runs a Job in this process
def runJobInSameProcess(shellCommand, workPath, full_resetTo_state, assumeNoSideEffects, candidateBox, twoWayPipe):
job = Job(shellCommand, workPath, full_resetTo_state, assumeNoSideEffects, candidateBox, twoWayPipe)
job.runAndReport()
# starts a Job in a new process
def runJobInOtherProcess(shellCommand, workPath, full_resetTo_state, assumeNoSideEffects, candidateBox, queue, identifier):
parentWriter, childReader = multiprocessing.Pipe()
childInfo = TwoWayPipe(childReader, queue, identifier)
process = multiprocessing.Process(target=runJobInSameProcess, args=(shellCommand, workPath, full_resetTo_state, assumeNoSideEffects, candidateBox, childInfo,))
process.start()
return parentWriter
class TwoWayPipe(object):
def __init__(self, readerConnection, writerQueue, identifier):
self.readerConnection = readerConnection
self.writerQueue = writerQueue
self.identifier = identifier
# Stores a subprocess for running tests and some information about which tests to run
class Job(object):
def __init__(self, shellCommand, workPath, full_resetTo_state, assumeNoSideEffects, candidateBox, twoWayPipe):
self.shellCommand = shellCommand
self.workPath = workPath
self.full_resetTo_state = full_resetTo_state
self.assumeNoSideEffects = assumeNoSideEffects
# all of the files that we've found so far that we can add
self.acceptedState = FilesState()
# HyperBox of all of the possible changes we're considering
self.candidateBox = candidateBox
# FilesState telling the current set of files that we're testing modifying
self.currentTestState = None
self.busy = False
self.complete = False
self.pipe = twoWayPipe
def runAndReport(self):
succeeded = False
try:
succeeded = self.run()
finally:
print("^" * 100)
self.pipe.writerQueue.put((self.pipe.identifier, succeeded))
def run(self):
print("#" * 100)
print("Checking " + self.candidateBox.summarize() + " (job " + str(self.pipe.identifier) + ") in " + str(self.workPath) + " at " + str(datetime.datetime.now()))
# set file state
if not self.assumeNoSideEffects:
fileIo.removePath(self.workPath)
# If the user told us that we don't have to worry about the possibility of the shell command generating files whose state matters,
# then we don't reset any unrecognized files (they might even be caches that improve speed)
testState = self.candidateBox
self.full_resetTo_state.expandedWithEmptyEntriesFor(testState).withConflictsFrom(testState, True).apply(self.workPath)
# run test
start = datetime.datetime.now()
returnCode = ShellScript(self.shellCommand, self.workPath).process()
now = datetime.datetime.now()
duration = (now - start).total_seconds()
# report results
if returnCode == 0:
print("Passed: " + self.candidateBox.summarize() + " (job " + str(self.pipe.identifier) + ") at " + str(datetime.datetime.now()) + " in " + str(duration))
return True
else:
print("Failed: " + self.candidateBox.summarize() + " (job " + str(self.pipe.identifier) + ") at " + str(datetime.datetime.now()) + " in " + str(duration))
return False
# Runner class that determines which diffs between two directories cause the given shell command to fail
class DiffRunner(object):
def __init__(self, failingPath, passingPath, shellCommand, workPath, assumeNoSideEffects, assumeInputStatesAreCorrect, tryFail, maxNumJobsAtOnce):
# some simple params
self.workPath = os.path.abspath(workPath)
self.bestState_path = fileIo.join(self.workPath, "bestResults")
self.sampleFailure_path = fileIo.join(self.workPath, "sampleFailure")
self.testScript_path = fileIo.join(self.workPath, "test.sh")
fileIo.ensureDirExists(os.path.dirname(self.testScript_path))
fileIo.writeScript(self.testScript_path, shellCommand)
self.originalPassingPath = os.path.abspath(passingPath)
self.originalFailingPath = os.path.abspath(failingPath)
self.assumeNoSideEffects = assumeNoSideEffects
self.assumeInputStatesAreCorrect = assumeInputStatesAreCorrect
self.tryFail = tryFail
# lists of all the files under the two dirs
print("Finding files in " + passingPath)
self.originalPassingState = filesStateFromTree(passingPath)
print("Found " + self.originalPassingState.summarize() + " in " + str(passingPath))
print("")
print("Finding files in " + failingPath)
self.originalFailingState = filesStateFromTree(failingPath)
print("Found " + self.originalFailingState.summarize() + " in " + str(failingPath))
print("")
print("Identifying duplicates")
# list of the files in the state to reset to after each test
self.full_resetTo_state = self.originalPassingState
# minimal description of only the files that are supposed to need to be reset after each test
self.resetTo_state = self.originalPassingState.expandedWithEmptyEntriesFor(self.originalFailingState).withoutDuplicatesFrom(self.originalFailingState, True)
self.targetState = self.originalFailingState.expandedWithEmptyEntriesFor(self.originalPassingState).withoutDuplicatesFrom(self.originalPassingState, True)
self.originalNumDifferences = self.resetTo_state.size()
print("Processing " + str(self.originalNumDifferences) + " file differences")
self.maxNumJobsAtOnce = maxNumJobsAtOnce
def cleanupTempDirs(self):
print("Clearing work directories")
if os.path.isdir(self.workPath):
for child in os.listdir(self.workPath):
if child.startswith("job-"):
fileIo.removePath(os.path.join(self.workPath, child))
def runnerTest(self, testState, timeout = None):
workPath = self.getWorkPath(0)
# reset state if needed
fileIo.removePath(workPath)
testState.apply(workPath)
start = datetime.datetime.now()
returnCode = ShellScript(self.testScript_path, workPath).process()
duration = (datetime.datetime.now() - start).total_seconds()
print("shell command completed in " + str(duration))
if returnCode == 0:
return (True, duration)
else:
if self.assumeNoSideEffects:
# unapply changes so that the contents of workPath should match self.resetTo_state
testState.withConflictsFrom(self.resetTo_state).apply(workPath)
return (False, duration)
def onSuccess(self, testState):
#print("Runner received success of testState: " + str(testState.summarize()))
if debug:
if not filesStateFromTree(self.bestState_path).checkSameKeys(self.full_resetTo_state.withoutEmptyEntries()):
print("Contents of " + self.bestState_path + " don't match self.full_resetTo_state at beginning of onSuccess")
sys.exit(1)
self.targetState = self.targetState.withoutDuplicatesFrom(testState)
self.resetTo_state = self.resetTo_state.withConflictsFrom(testState).withoutDuplicatesFrom(testState)
delta = self.full_resetTo_state.expandedWithEmptyEntriesFor(testState).withConflictsFrom(testState, True)
delta.apply(self.bestState_path)
self.full_resetTo_state = self.full_resetTo_state.expandedWithEmptyEntriesFor(delta).withConflictsFrom(delta)
if debug:
if not filesStateFromTree(self.bestState_path).checkSameKeys(self.full_resetTo_state.withoutEmptyEntries()):
print("Contents of " + self.bestState_path + " don't match self.full_resetTo_state at end of onSuccess")
print("Applied this delta: " + str(delta))
sys.exit(1)
def getWorkPath(self, jobId):
return os.path.join(self.workPath, "job-" + str(jobId))
def run(self):
start = datetime.datetime.now()
numIterationsCompleted = 0
self.cleanupTempDirs()
workPath = self.getWorkPath(0)
if not self.assumeInputStatesAreCorrect:
print("Testing that the given failing state actually fails")
fileIo.removePath(workPath)
if self.runnerTest(self.originalFailingState)[0]:
print("\nGiven failing state at " + self.originalFailingPath + " does not actually fail!")
return False
print("Testing that the given passing state actually passes")
if not self.runnerTest(self.full_resetTo_state)[0]:
print("\nGiven passing state at " + self.originalPassingPath + " does not actually pass!")
return False
print("Saving best state found so far")
fileIo.removePath(self.bestState_path)
self.full_resetTo_state.apply(self.bestState_path)
print("Starting")
print("You can inspect " + self.bestState_path + " while this process runs, to observe the best state discovered so far")
print("You can inspect " + self.sampleFailure_path + " while this process runs, to observe a state for which the test failed. If you delete this filepath, then it will be updated later to contain a new failing state")
print("")
# Now we search over groups of inodes (files or dirs) in the tree
# Every time we encounter a group of inodes, we try replacing them and seeing if the replacement passes our test
# If it does, we accept those changes and continue searching
# If it doesn't, we split that group into smaller groups and continue
jobId = 0
workingDir = self.getWorkPath(jobId)
queue = multiprocessing.Queue()
activeTestStatesById = {}
initialSplitSize = 2
if self.maxNumJobsAtOnce != "auto" and self.maxNumJobsAtOnce > 2:
initialSplitSize = self.maxNumJobsAtOnce
availableTestStates = self.targetState.splitOnce(initialSplitSize)
numConsecutiveFailures = 0
numFailuresSinceLastSplitOrSuccess = 0
numCompletionsSinceLastPoolSizeChange = 0
invalidatedIds = set()
probablyAcceptableStates = []
numCompletedTests = 2 # Already tested initial passing state and initial failing state
numJobsAtFirstSuccessAfterMerge = None
# continue until all files fail and no jobs are running
while numFailuresSinceLastSplitOrSuccess < self.resetTo_state.size() or len(activeTestStatesById) > 0:
# display status message
now = datetime.datetime.now()
elapsedDuration = now - start
minNumTestsRemaining = sum([math.log(box.size(), 2) + 1 for box in availableTestStates + activeTestStatesById.values()]) - numFailuresSinceLastSplitOrSuccess
estimatedNumTestsRemaining = max(minNumTestsRemaining, 1)
if numConsecutiveFailures >= 4 and numFailuresSinceLastSplitOrSuccess < 1:
# If we are splitting often and failing often, then we probably haven't yet
# shrunken the individual boxes down to each contain only one failing file
# During this phase, on average we've completed half of the work
# So, we estimate that the total work remaining is double what we've completed
estimatedNumTestsRemaining *= 2
estimatedRemainingDuration = datetime.timedelta(seconds = elapsedDuration.total_seconds() * float(estimatedNumTestsRemaining) / float(numCompletedTests))
message = "Elapsed duration: " + str(elapsedDuration) + ". Waiting for " + str(len(activeTestStatesById)) + " active subprocesses (" + str(len(availableTestStates) + len(activeTestStatesById)) + " total available jobs). " + str(self.resetTo_state.size()) + " changes left to test, should take about " + str(estimatedNumTestsRemaining) + " tests, about " + str(estimatedRemainingDuration)
print(message)
if len(activeTestStatesById) > 0:
# wait for a response from a worker
response = queue.get()
identifier = response[0]
box = activeTestStatesById[identifier]
didAcceptState = response[1]
numCompletedTests += 1
numCompletionsSinceLastPoolSizeChange += 1
if didAcceptState:
numConsecutiveFailures = 0
numFailuresSinceLastSplitOrSuccess = 0
acceptedState = box #.getAllFiles()
#print("Succeeded : " + acceptedState.summarize() + " (job " + str(identifier) + ") at " + str(datetime.datetime.now()))
maxRunningSize = max([state.size() for state in activeTestStatesById.values()])
maxRelevantSize = maxRunningSize / len(activeTestStatesById)
if acceptedState.size() < maxRelevantSize:
print("Queuing a retest of response of size " + str(acceptedState.size()) + " from job " + str(identifier) + " because a much larger job of size " + str(maxRunningSize) + " is still running")
probablyAcceptableStates.append(acceptedState)
else:
if identifier in invalidatedIds:
# queue a retesting of this box
print("Queuing a re-test of response from job " + str(identifier) + " due to previous invalidation. Successful state: " + str(acceptedState.summarize()))
probablyAcceptableStates.append(acceptedState)
else:
# A worker discovered a nonempty change that can be made successfully; update our best accepted state
self.onSuccess(acceptedState)
if debug:
# The files in self.bestState_path should exactly match what's in workPath[identifier], except for files that didn't originally exist
if not filesStateFromTree(self.bestState_path).checkSameKeys(filesStateFromTree(self.getWorkPath(identifier)).restrictedToKeysIn(self.originalPassingState.expandedWithEmptyEntriesFor(self.originalFailingState))):
print("Successful state from work path " + str(identifier) + " wasn't correctly copied to bestState. Could the test command be deleting files that previously existed?")
sys.exit(1)
# record that the results from any previously started process are no longer guaranteed to be valid
for i in activeTestStatesById.keys():
if i != identifier:
invalidatedIds.add(i)
# record our first success
if numJobsAtFirstSuccessAfterMerge is None:
numJobsAtFirstSuccessAfterMerge = len(availableTestStates)
else:
if not os.path.isdir(self.sampleFailure_path):
# save sample failure path where user can see it
print("Saving sample failed state to " + str(self.sampleFailure_path))
fileIo.ensureDirExists(self.sampleFailure_path)
self.full_resetTo_state.expandedWithEmptyEntriesFor(box).withConflictsFrom(box, True).apply(self.sampleFailure_path)
#print("Failed : " + box.summarize() + " (job " + str(identifier) + ") at " + str(datetime.datetime.now()))
# count failures
numConsecutiveFailures += 1
numFailuresSinceLastSplitOrSuccess += 1
# find any children that failed and queue a re-test of those children
updatedChild = box.withoutDuplicatesFrom(box.withConflictsFrom(self.resetTo_state))
if updatedChild.size() > 0:
if numConsecutiveFailures >= 4:
# Suppose we are trying to identify n single-file changes that cause failures
# Suppose we have tried c changes of size s, each one of which failed
# We conclude that n >= c
# A mostly unbiased estimate of c as a function of n is that c = n / 2
# Similarly, a mostly unbiased estimate of n is that n = c * 2
# We want to choose a new number of changes to test, c2, such that running c2 tests results in efficiently identifying the relevant n changes
# Let's set c2 = 2 * n = 2 * 2 * c
splitFactor = 4
else:
# After we reach a sufficiently small change size such that some changes start passing,
# Then we assume that we've probably narrowed down to each individual failing change,
# And we can increase block sizes more slowly
splitFactor = 2
split = updatedChild.splitOnce(splitFactor)
if len(split) > 1:
numFailuresSinceLastSplitOrSuccess = 0
availableTestStates += split
# clear invalidation status
if identifier in invalidatedIds:
invalidatedIds.remove(identifier)
del activeTestStatesById[identifier]
# Check whether we've had enough failures lately to warrant checking for the possibility of dependencies among files
if numJobsAtFirstSuccessAfterMerge is not None:
if len(availableTestStates) > 3 * numJobsAtFirstSuccessAfterMerge:
# It's plausible that every file in one directory depends on every file in another directory
# If this happens, then after we delete the dependent directory, we can delete the dependency directory too
# To make sure that we consider deleting the dependency directory, we recombine all of our states and start splitting from there
print("#############################################################")
print("# #")
print("# Lots of failures since first success!!!!!!!!!!!!!!!!!!!!! #")
print("# Recombining all states in case we uncovered a dependency! #")
print("# #")
print("#############################################################")
rejoinedState = FilesState()
for state in availableTestStates:
rejoinedState = rejoinedState.expandedWithEmptyEntriesFor(state).withConflictsFrom(state)
rejoinedState = rejoinedState.withoutDuplicatesFrom(self.resetTo_state)
availableTestStates = rejoinedState.splitOnce(initialSplitSize)
numFailuresSinceLastSplitOrSuccess = 0
numJobsAtFirstSuccessAfterMerge = None
numCompletionsSinceLastPoolSizeChange = 0
# if probablyAcceptableStates has become large enough, then retest its contents too
if len(probablyAcceptableStates) > 0 and (len(probablyAcceptableStates) >= len(activeTestStatesById) + 1 or numConsecutiveFailures >= len(activeTestStatesById) or len(activeTestStatesById) < 1):
probablyAcceptableState = FilesState()
for state in probablyAcceptableStates:
probablyAcceptableState = probablyAcceptableState.expandedWithEmptyEntriesFor(state).withConflictsFrom(state)
probablyAcceptableState = probablyAcceptableState.withoutDuplicatesFrom(self.resetTo_state)
if probablyAcceptableState.size() > 0:
print("Retesting " + str(len(probablyAcceptableStates)) + " previous likely successful states as a single test: " + probablyAcceptableState.summarize())
availableTestStates = [probablyAcceptableState] + availableTestStates
probablyAcceptableStates = []
if len(availableTestStates) < 1 and len(activeTestStatesById) < 1:
print("Error: no changes remain left to test. It was expected that applying all changes would fail")
break
# if we haven't checked everything yet, then try to queue more jobs
if numFailuresSinceLastSplitOrSuccess < self.resetTo_state.size():
availableTestStates.sort(reverse=True, key=FilesState.size)
if self.maxNumJobsAtOnce != "auto":
targetNumJobs = self.maxNumJobsAtOnce
else:
# If N jobs are running then wait for all N to fail before increasing the number of running jobs
# Recalibrate the number of processes based on the system load
systemUsageStats = psutil.cpu_times_percent(interval=None)
systemIdleFraction = systemUsageStats.idle / 100
if systemIdleFraction >= 0.5:
if numCompletionsSinceLastPoolSizeChange <= len(activeTestStatesById):
# Not much time has passed since the previous time we changed the pool size
targetNumJobs = len(activeTestStatesById) + 1 # just replace existing job
else:
# We've been using less than the target capacity for a while, so add another job
targetNumJobs = len(activeTestStatesById) + 2 # replace existing job and add a new one
numCompletionsSinceLastPoolSizeChange = 0
else:
targetNumJobs = len(activeTestStatesById) # don't replace existing job
numCompletionsSinceLastPoolSizeChange = 0
if targetNumJobs < 1:
targetNumJobs = 1
print("System idle = " + str(systemIdleFraction) + ", current num jobs = " + str(len(activeTestStatesById) + 1) + ", target num jobs = " + str(targetNumJobs))
while len(activeTestStatesById) < targetNumJobs and len(activeTestStatesById) < self.resetTo_state.size() and len(availableTestStates) > 0:
# find next pending job
box = availableTestStates[0]
# find next unused job id
jobId = 0
while jobId in activeTestStatesById:
jobId += 1
# start job
workingDir = self.getWorkPath(jobId)
runJobInOtherProcess(self.testScript_path, workingDir, self.full_resetTo_state, self.assumeNoSideEffects, box, queue, jobId)
activeTestStatesById[jobId] = box
availableTestStates = availableTestStates[1:]
print("double-checking results")
wasSuccessful = True
if not self.runnerTest(filesStateFromTree(self.bestState_path))[0]:
message = "Error: expected best state at " + self.bestState_path + " did not pass the second time. Could the test be non-deterministic?"
if self.assumeNoSideEffects:
message += " (it may help to remove the --assume-no-side-effects flag)"
if self.assumeInputStatesAreCorrect:
message += " (it may help to remove the --assume-input-states-are-correct flag)"
print(message)
wasSuccessful = False
self.cleanupTempDirs()
print("")
if self.targetState.size() < 1000:
filesDescription = str(self.targetState)
else:
filesDescription = str(self.targetState.summarize())
print("Done trying to transform the contents of passing path:\n " + self.originalPassingPath + "\ninto the contents of failing path:\n " + self.originalFailingPath)
print("Of " + str(self.originalNumDifferences) + " differences, could not accept: " + filesDescription)
print("The final accepted state can be seen at " + self.bestState_path)
return wasSuccessful
def main(args):
assumeNoSideEffects = False
assumeInputStatesAreCorrect = False
tryFail = False
workPath = "/tmp/diff-filterer"
maxNumJobsAtOnce = 1
while len(args) > 0:
arg = args[0]
if arg == "--assume-no-side-effects":
assumeNoSideEffects = True
args = args[1:]
continue
if arg == "--assume-input-states-are-correct":
assumeInputStatesAreCorrect = True
args = args[1:]
continue
if arg == "--try-fail":
tryFail = True
args = args[1:]
continue
if arg == "--work-path":
if len(args) < 2:
usage()
workPath = args[1]
args = args[2:]
continue
if arg == "--num-jobs":
if len(args) < 2:
usage()
val = args[1]
if val == "auto":
maxNumJobsAtOnce = val
else:
maxNumJobsAtOnce = int(val)
args = args[2:]
continue
if arg == "--debug":
global debug
debug = True
args = args[1:]
continue
if len(arg) > 0 and arg[0] == "-":
print("Unrecognized argument: '" + arg + "'")
usage()
break
if len(args) != 3:
usage()
passingPath = args[0]
failingPath = args[1]
shellCommand = args[2]
startTime = datetime.datetime.now()
if tryFail:
temp = passingPath
passingPath = failingPath
failingPath = temp
if not os.path.exists(passingPath):
print("Specified passing path " + passingPath + " does not exist")
sys.exit(1)
if not os.path.exists(failingPath):
print("Specified failing path " + failingPath + " does not exist")
sys.exit(1)
success = DiffRunner(failingPath, passingPath, shellCommand, workPath, assumeNoSideEffects, assumeInputStatesAreCorrect, tryFail, maxNumJobsAtOnce).run()
endTime = datetime.datetime.now()
duration = endTime - startTime
if success:
print("Succeeded in " + str(duration))
else:
print("Failed in " + str(duration))
sys.exit(1)
main(sys.argv[1:])
|
test_remote.py
|
import threading
import time
import unittest
from jina.logging import get_logger
from jina.main.parser import set_gateway_parser, set_pea_parser
from jina.peapods.pod import GatewayPod
from jina.peapods.remote import PeaSpawnHelper
from tests import JinaTestCase
class MyTestCase(JinaTestCase):
def test_logging_thread(self):
_event = threading.Event()
logger = get_logger('mytest', event_trigger=_event)
def _print_messages():
while True:
_event.wait()
print(f'thread: {_event.record}')
print(type(_event.record))
_event.clear()
t = threading.Thread(target=_print_messages)
t.daemon = True
t.start()
logger.info('blah, blah')
logger.info('blah, blah, blah')
time.sleep(.1)
logger.warning('warn, warn, warn')
time.sleep(.1)
logger.debug('warn, warn, warn')
time.sleep(.1)
logger.success('crit')
time.sleep(.1)
def tearDown(self) -> None:
time.sleep(2)
super().tearDown()
def test_remote_not_allowed(self):
f_args = set_gateway_parser().parse_args([])
p_args = set_pea_parser().parse_args(['--host', 'localhost', '--port-grpc', str(f_args.port_grpc)])
with GatewayPod(f_args):
PeaSpawnHelper(p_args).start()
def test_cont_gateway(self):
f1_args = set_gateway_parser().parse_args(['--allow-spawn'])
f2_args = set_gateway_parser().parse_args([])
with GatewayPod(f1_args):
pass
with GatewayPod(f2_args):
pass
if __name__ == '__main__':
unittest.main()
|
Faster_RCNN_data_multiprocessing.py
|
from multiprocessing import Process, Manager
from PIL import Image, ImageDraw
from tqdm import tqdm
import tensorflow as tf
import numpy as np
import scipy.misc
import random
import math
import json
import sys
import cv2
import os
class Data(object):
def __init__(self, dataset_root, proposal_root, COCO_name_file):
self.dataset_root = dataset_root
self.proposal_root = proposal_root
self.COCO_names = self.get_COCO_names(COCO_name_file)
self.anchor_scales = [64, 128, 256, 512]
self.anchor_ratios = [1., 0.5, 2.]
self.sizes = [(int(s * math.sqrt(r)), int(s * math.sqrt(1/r))) for s in self.anchor_scales for r in self.anchor_ratios]
self.data, self.img_path, self.xml_path, self.proposal_path = self.collect_data()
self.num = len(self.data)
self.start = 0
self.end = 0
def Hprint(self, content):
print('\033[1;36;40m')
print(content)
print('\033[0m')
def IOU(self, box1, box2):
#left1, top1, right1, bottom1 = box1
#left2, top2, right2, bottom2 = box2
top1, left1, bottom1, right1 = box1
top2, left2, bottom2, right2 = box2
area1 = (right1 - left1) * (bottom1 - top1)
area2 = (right2 - left2) * (bottom2 - top2)
left = np.maximum(left1, left2)
right = np.minimum(right1, right2)
top = np.maximum(top1, top2)
bottom = np.minimum(bottom1, bottom2)
intersection = np.maximum(0, (right - left)) * np.maximum(0, (bottom - top))
union = area1 + area2 - intersection
iou = intersection.astype(np.float32) / union
return iou
def get_COCO_names(self, COCO_name_file):
with open(COCO_name_file) as f:
data = f.readline().strip()
jsondata = json.loads(data)
return jsondata
def read_image(self, start, end, xml_files, image_files, proposal_files, data):
for i in tqdm(range(start,end)):
f = xml_files[i]
name = '.'.join(f.split('.')[:-1])
xml_path = os.path.join(self.dataset_root, f)
image_path = os.path.join(self.dataset_root, name+'.jpg')
proposal_path = os.path.join(self.proposal_root, name + '.txt')
img = cv2.imread(image_path)
if img.shape != (640,640,3):
img = cv2.resize(img, (640,640))
data[i] = img
xml_files[i] = xml_path
image_files[i] = image_path
proposal_files[i] = proposal_path
def collect_data(self, workers = 8):
files = os.listdir(self.dataset_root)
files = [f for f in files if '.xml' in f]
num = len(files)
interval = num // workers
xml_files = Manager().list(files)
image_files = Manager().list([None for _ in range(num)])
proposal_files = Manager().list([None for _ in range(num)])
data = Manager().list([None for _ in range(num)])
starts, ends = [], []
self.Hprint('Collecting data ... ...')
for i in range(workers):
starts.append(i*interval)
ends.append((i+1)*interval)
ends[-1] = num
processes = []
for i in range(workers):
p = Process(target = self.read_image, args = (starts[i], ends[i], xml_files, image_files, proposal_files, data))
p.start()
processes.append(p)
for p in processes:
p.join()
return data, image_files, xml_files, proposal_files
'''
def collect_data(self):
data = []
img_path = []
xml_paths = []
proposal_paths = []
files = os.listdir(self.dataset_root)
for i in tqdm(range(len(files))):
f = files[i]
name, ext = f.split('.')
if ext == 'xml':
xml_path = os.path.join(self.dataset_root, f)
image_path = os.path.join(self.dataset_root, name+'.jpg')
proposal_path = os.path.join(self.proposal_root, name + '.txt')
img = cv2.imread(image_path)
if img.shape != (640,640,3):
img = cv2.resize(img, (640,640))
data.append(img)
img_path.append(image_path)
xml_paths.append(xml_path)
proposal_paths.append(proposal_path)
return data, img_path, xml_paths, proposal_paths
'''
def generate_anchors(self, img):
img_h, img_w, img_c = img.shape
img_h_rescale, img_w_rescale = img_h//16, img_w//16
x_coords = np.ones([img_h_rescale,img_w_rescale])
y_coords = np.ones([img_h_rescale,img_w_rescale])
if img_w_rescale < len(range(0, img_w, 16)):
x_coords = x_coords * np.array(list(range(0, img_w, 16))[:-1]) + 8
else:
x_coords = x_coords * np.array(list(range(0, img_w, 16))) + 8
if img_h_rescale < len(range(0, img_h, 16)):
y_coords = y_coords.T * np.array(list(range(0, img_h, 16))[:-1]) + 8
y_coords = y_coords.T
else:
y_coords = y_coords.T * np.array(list(range(0, img_h, 16))) + 8
y_coords = y_coords.T
all_anchors_x = []
all_anchors_y = []
all_anchors_w = []
all_anchors_h = []
for (w,h) in self.sizes:
all_anchors_x.append(x_coords)
all_anchors_y.append(y_coords)
all_anchors_w.append(np.ones(x_coords.shape) * w)
all_anchors_h.append(np.ones(x_coords.shape) * h)
all_anchors_x = np.stack(all_anchors_x, axis=-1)
all_anchors_y = np.stack(all_anchors_y, axis=-1)
all_anchors_w = np.stack(all_anchors_w, axis=-1)
all_anchors_h = np.stack(all_anchors_h, axis=-1)
return all_anchors_x, all_anchors_y, all_anchors_w, all_anchors_h
def get_bboxes(self, img, xml_path):
img_h, img_w, img_c = img.shape
bboxes = []
with open(xml_path) as f:
line = f.readline().strip()
while line:
if '</name>' in line:
cls = line[6:-7]
elif '</xmin>' in line:
left = int(float(line[6:-7]))
if left < 0:
left = 0
elif '</ymin>' in line:
top = int(float(line[6:-7]))
if top < 0:
top = 0
elif '</xmax>' in line:
right = int(float(line[6:-7]))
if right > img_w:
right = img_w
elif '</ymax>' in line:
bottom = int(float(line[6:-7]))
if bottom > img_h:
bottom = img_h
x,y,w,h = (left + right) // 2, (top + bottom) // 2, right-left, bottom-top
bboxes.append([cls, x, y, w, h])
line = f.readline().strip()
return bboxes
def get_proposals(self, proposal_path):
proposals = []
with open(proposal_path) as f:
line = f.readline().strip()
while line:
y1, x1, y2, x2 = line.split(',')
y1, x1, y2, x2 = float(y1), float(x1), float(y2), float(x2)
proposals.append([y1,x1,y2,x2])
line = f.readline().strip()
proposals = np.array(proposals)
return proposals
def get_batch(self, batch_size = 64):
while True:
if self.start + 1 <= self.num:
self.end = self.start + 1
batch = self.data[self.start:self.end]
batch_xml = self.xml_path[self.start:self.end]
batch_proposal = self.proposal_path[self.start:self.end]
img = batch[0] / 255.
img = img.flatten()
img = img[np.newaxis, :]
all_anchors_x, all_anchors_y, all_anchors_w, all_anchors_h = self.generate_anchors(batch[0])
bboxes = self.get_bboxes(batch[0], batch_xml[0])
self.start += 1
else:
self.start = 0
self.end = 1
batch = self.data[self.start:self.end]
batch_xml = self.xml_path[self.start:self.end]
batch_proposal = self.proposal_path[self.start:self.end]
img = batch[0] / 255.
img = img.flatten()
img = img[np.newaxis, :]
all_anchors_x, all_anchors_y, all_anchors_w, all_anchors_h = self.generate_anchors(batch[0])
bboxes = self.get_bboxes(batch[0], batch_xml[0])
self.start += 1
all_GT_cls = [self.COCO_names[x[0]] for x in bboxes]
all_GT_x = [x[1] for x in bboxes]
all_GT_y = [x[2] for x in bboxes]
all_GT_w = [x[3] for x in bboxes]
all_GT_h = [x[4] for x in bboxes]
all_GT_cls = np.array(all_GT_cls)
all_GT_x = np.array(all_GT_x)
all_GT_y = np.array(all_GT_y)
all_GT_w = np.array(all_GT_w)
all_GT_h = np.array(all_GT_h)
all_GT_y1 = all_GT_y - all_GT_h/2
all_GT_x1 = all_GT_x - all_GT_w/2
all_GT_y2 = all_GT_y + all_GT_h/2
all_GT_x2 = all_GT_x + all_GT_w/2
all_GT_boxes = np.vstack([all_GT_y1, all_GT_x1, all_GT_y2, all_GT_x2]) # shape is (4, 3), 3 boxes, 4 coords each
#all_GT_boxes = all_GT_boxes.T
#print('all_GT_boxes: ', all_GT_boxes, all_GT_boxes.shape)
proposals = self.get_proposals(batch_proposal[0]) # [y1,x1,y2,x2] i.e. [top, left, bottom, right]
unsort_P = []
pos_num = 0
for proposal in proposals:
iou = self.IOU(proposal, all_GT_boxes) # shape is (3, ), i.e. number of GT boxes
#print('iou: ', iou, iou.shape)
iou_max_index = np.argmax(iou)
box_max = all_GT_boxes[:, iou_max_index] # shape is (4, ), i.e. [y1, x1, y2, x2]
box_max_y1, box_max_x1, box_max_y2, box_max_x2 = box_max
#print('box_max: ', box_max, box_max.shape)
max_iou = np.max(iou)
cls_max = all_GT_cls[iou_max_index]
if max_iou < 0.5:
cls_max = 0
else:
pos_num += 1
#print('IOU: ', iou, iou_max_index, cls_max)
unsort_P.append([max_iou, cls_max, proposal, box_max_y1, box_max_x1, box_max_y2, box_max_x2])
sort_P = sorted(unsort_P, key = lambda x:x[0])[::-1]
#print('sort_P: ', sort_P)
batch_data_use = sort_P[:batch_size]
random.shuffle(batch_data_use)
proposals_in_batch = [x[2] for x in batch_data_use]
proposals_in_batch = np.vstack(proposals_in_batch)
classes_in_batch = [x[1] for x in batch_data_use]
classes_in_batch_onehot = np.eye(81)[classes_in_batch]
box_max_y1_in_batch = [x[3] for x in batch_data_use]
box_max_y1_in_batch = np.array(box_max_y1_in_batch) # shape is (64, ), i.e. (batch_size, )
box_max_x1_in_batch = [x[4] for x in batch_data_use]
box_max_x1_in_batch = np.array(box_max_x1_in_batch) # shape is (64, ), i.e. (batch_size, )
box_max_y2_in_batch = [x[5] for x in batch_data_use]
box_max_y2_in_batch = np.array(box_max_y2_in_batch) # shape is (64, ), i.e. (batch_size, )
box_max_x2_in_batch = [x[6] for x in batch_data_use]
box_max_x2_in_batch = np.array(box_max_x2_in_batch) # shape is (64, ), i.e. (batch_size, )
#print('proposals_in_batch: ', proposals_in_batch, proposals_in_batch.shape, type(proposals_in_batch[0]))
#print('classes_in_batch_onehot: ', classes_in_batch_onehot, classes_in_batch_onehot.shape)
#print('box_max_y1_in_batch: ', box_max_y1_in_batch, type(box_max_y1_in_batch), box_max_y1_in_batch.shape)
yield batch_xml, img, proposals_in_batch, classes_in_batch_onehot, box_max_y1_in_batch, box_max_x1_in_batch, box_max_y2_in_batch, box_max_x2_in_batch, pos_num
#yield batch_xml, img, proposals , all_anchors_x, all_anchors_y, all_anchors_w, all_anchors_h, all_GT_cls, all_GT_x, all_GT_y, all_GT_w, all_GT_h,all_GT_boxes
# tmp draw func
def draw_rect(img, boxes, save_path):
pil_img = Image.fromarray(img)
draw = ImageDraw.Draw(pil_img)
for box in boxes:
#box_x, box_y, box_w, box_h = box
box_y1, box_x1, box_y2, box_x2 = box
#left = int(box_x - box_w/2)
left = int(box_x1)
#right = int(box_x + box_w/2)
right = int(box_x2)
#top = int(box_y - box_h/2)
top = int(box_y1)
#bottom = int(box_y + box_h/2)
bottom = int(box_y2)
draw.rectangle(((left, top), (right, bottom)), outline=(random.randint(0,255), random.randint(0,255), random.randint(0,255)))
del draw
pil_img.save(save_path)
if __name__ == '__main__':
save_folder = '/root/Faster_RCNN_tensorflow/dataset/debug_draw/'
data = Data('/root/Faster_RCNN_tensorflow/dataset/test_for_resize/', '/root/Faster_RCNN_tensorflow/dataset/trainval_resize_proposals_VGGpretrain_step2/', '/root/Faster_RCNN_tensorflow/COCO_names.names')
gen = data.get_batch(32)
for i in range(len(os.listdir('/root/Faster_RCNN_tensorflow/dataset/trainval_resize_proposals_VGGpretrain_step2/'))):
batch_xml, img, proposals_in_batch, classes_in_batch_onehot, box_max_y1_in_batch, box_max_x1_in_batch, box_max_y2_in_batch, box_max_x2_in_batch, pos_num = gen.__next__()
print('box_max_y1_in_batch: ', box_max_y1_in_batch, box_max_y1_in_batch.shape)
print('box_max_x1_in_batch: ', box_max_x1_in_batch, box_max_x1_in_batch.shape)
print('proposals: ', proposals_in_batch, proposals_in_batch.shape)
'''
print('positive number: ', pos_num)
print('batch_xml: ', batch_xml)
print('proposals: ', proposals_in_batch, proposals_in_batch.shape)
xml_path = batch_xml[0]
basename = os.path.basename(xml_path)
name, _ = basename.split('.')
image_path = os.path.join('/root/Faster_RCNN_tensorflow/dataset/test_for_resize/', name+'.jpg')
save_path = os.path.join(save_folder, name+'.jpg')
print('SAVE PATH: ', save_path)
img = scipy.misc.imread(image_path)
draw_rect(img, proposals_in_batch, save_path)
'''
'''
with open('positive_number.log', 'a') as log:
log.write(str(pos_num))
log.write('\n')
'''
break
|
lisp-rtr.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-rtr.py
#
# This file performs LISP Reencapsualting Tunnel Router (RTR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import lisp
import lispconfig
import socket
import time
import select
import threading
import pcappy
import os
import copy
try :
from commands import getoutput
except :
from subprocess import getoutput
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
import binascii
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
Oo0o = [ None , None , None ]
OOO0o0o = None
Ii1iI = None
Oo = None
I1Ii11I1Ii1i = None
Ooo = lisp . lisp_get_ephemeral_port ( )
o0oOoO00o = None
i1 = None
oOOoo00O0O = None
if 15 - 15: I1IiiI
O0ooo00OOo00 = [ ]
if 98 - 98: i11iIiiIii * I1IiiI % iII111i * iII111i * II111iiii
if 79 - 79: IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
if 58 - 58: i11iIiiIii % I1Ii111
if 54 - 54: OOooOOo % O0 + I1IiiI - iII111i / I11i
iIiiI1 = None
if 68 - 68: I1IiiI - i11iIiiIii - OoO0O00 / OOooOOo - OoO0O00 + i1IIi
if 48 - 48: OoooooooOO % o0oOOo0O0Ooo . I1IiiI - Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
oo0Ooo0 = ( os . getenv ( "LISP_RTR_FAST_DATA_PLANE" ) != None )
I1I11I1I1I = ( os . getenv ( "LISP_RTR_LATENCY_DEBUG" ) != None )
if 90 - 90: II111iiii + oO0o / o0oOOo0O0Ooo % II111iiii - O0
if 29 - 29: o0oOOo0O0Ooo / iIii1I11I1II1
if 24 - 24: O0 % o0oOOo0O0Ooo + i1IIi + I1Ii111 + I1ii11iIi11i
if 70 - 70: Oo0Ooo % Oo0Ooo . IiII % OoO0O00 * o0oOOo0O0Ooo % oO0o
if 23 - 23: i11iIiiIii + I1IiiI
if 68 - 68: OoOoOO00 . oO0o . i11iIiiIii
if 40 - 40: oO0o . OoOoOO00 . Oo0Ooo . i1IIi
if 33 - 33: Ii1I + II111iiii % i11iIiiIii . ooOoO0o - I1IiiI
def O00oooo0O ( parameter ) :
global O0ooo00OOo00
if 22 - 22: OoooooooOO % I11i - iII111i . iIii1I11I1II1 * i11iIiiIii
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "RTR" ,
O0ooo00OOo00 ) )
if 32 - 32: Oo0Ooo * O0 % oO0o % Ii1I . IiII
if 61 - 61: ooOoO0o
if 79 - 79: Oo0Ooo + I1IiiI - iII111i
if 83 - 83: ooOoO0o
if 64 - 64: OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
def Ii1IOo0o0 ( parameter ) :
global O0ooo00OOo00
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "RTR" , O0ooo00OOo00 ,
True ) )
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
def o00oOO0 ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "RTR" ) )
if 95 - 95: OOooOOo / OoooooooOO
if 18 - 18: i11iIiiIii
if 46 - 46: i1IIi / I11i % OOooOOo + I1Ii111
if 79 - 79: I1Ii111 - o0oOOo0O0Ooo + I1Ii111 - iII111i
if 8 - 8: I1IiiI
if 75 - 75: iIii1I11I1II1 / OOooOOo % o0oOOo0O0Ooo * OoOoOO00
if 9 - 9: OoO0O00
def i11 ( kv_pair ) :
lispconfig . lisp_database_mapping_command ( kv_pair )
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
def O0O0O ( kv_pair ) :
oO0Oo = { "rloc-probe" : False , "igmp-query" : False }
if 54 - 54: o0oOOo0O0Ooo - I1IiiI + OoooooooOO
for O0o0 in kv_pair . keys ( ) :
OO00Oo = kv_pair [ O0o0 ]
if 51 - 51: IiII * o0oOOo0O0Ooo + I11i + OoO0O00
if ( O0o0 == "instance-id" ) :
o0O0O00 = OO00Oo . split ( "-" )
oO0Oo [ "instance-id" ] = [ 0 , 0 ]
if ( len ( o0O0O00 ) == 1 ) :
oO0Oo [ "instance-id" ] [ 0 ] = int ( o0O0O00 [ 0 ] )
oO0Oo [ "instance-id" ] [ 1 ] = int ( o0O0O00 [ 0 ] )
else :
oO0Oo [ "instance-id" ] [ 0 ] = int ( o0O0O00 [ 0 ] )
oO0Oo [ "instance-id" ] [ 1 ] = int ( o0O0O00 [ 1 ] )
if 86 - 86: I11i / IiII % i11iIiiIii
if 7 - 7: ooOoO0o * OoO0O00 % oO0o . IiII
if ( O0o0 == "eid-prefix" ) :
Ii1iIiII1ii1 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
Ii1iIiII1ii1 . store_prefix ( OO00Oo )
oO0Oo [ "eid-prefix" ] = Ii1iIiII1ii1
if 62 - 62: iIii1I11I1II1 * OoOoOO00
if ( O0o0 == "group-prefix" ) :
i1OOO = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
i1OOO . store_prefix ( OO00Oo )
oO0Oo [ "group-prefix" ] = i1OOO
if 59 - 59: II111iiii + OoooooooOO * OoOoOO00 + i1IIi
if ( O0o0 == "rloc-prefix" ) :
Oo0OoO00oOO0o = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
Oo0OoO00oOO0o . store_prefix ( OO00Oo )
oO0Oo [ "rloc-prefix" ] = Oo0OoO00oOO0o
if 80 - 80: oO0o + OOooOOo - OOooOOo % iII111i
if ( O0o0 == "rloc-probe" ) :
oO0Oo [ "rloc-probe" ] = ( OO00Oo == "yes" )
if 63 - 63: I1IiiI - I1ii11iIi11i + O0 % I11i / iIii1I11I1II1 / o0oOOo0O0Ooo
if ( O0o0 == "igmp-query" ) :
oO0Oo [ "igmp-query" ] = ( OO00Oo == "yes" )
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
for o000O0o in lisp . lisp_glean_mappings :
if ( o000O0o . has_key ( "eid-prefix" ) ^ oO0Oo . has_key ( "eid-prefix" ) ) : continue
if ( o000O0o . has_key ( "eid-prefix" ) and oO0Oo . has_key ( "eid-prefix" ) ) :
iI1iII1 = o000O0o [ "eid-prefix" ]
oO0OOoo0OO = oO0Oo [ "eid-prefix" ]
if ( iI1iII1 . is_exact_match ( oO0OOoo0OO ) == False ) : continue
if 65 - 65: Ii1I . iIii1I11I1II1 / O0 - Ii1I
if 21 - 21: I1IiiI * iIii1I11I1II1
if ( o000O0o . has_key ( "group-prefix" ) ^ oO0Oo . has_key ( "group-prefix" ) ) :
continue
if 91 - 91: IiII
if ( o000O0o . has_key ( "group-prefix" ) and oO0Oo . has_key ( "group-prefix" ) ) :
iI1iII1 = o000O0o [ "group-prefix" ]
oO0OOoo0OO = oO0Oo [ "group-prefix" ]
if ( iI1iII1 . is_exact_match ( oO0OOoo0OO ) == False ) : continue
if 15 - 15: II111iiii
if 18 - 18: i11iIiiIii . i1IIi % OoooooooOO / O0
if ( o000O0o . has_key ( "rloc-prefix" ) ^ oO0Oo . has_key ( "rloc-prefix" ) ) : continue
if ( o000O0o . has_key ( "rloc-prefix" ) and oO0Oo . has_key ( "rloc-prefix" ) ) :
iI1iII1 = o000O0o [ "rloc-prefix" ]
oO0OOoo0OO = oO0Oo [ "rloc-prefix" ]
if ( iI1iII1 . is_exact_match ( oO0OOoo0OO ) == False ) : continue
if 75 - 75: OoOoOO00 % o0oOOo0O0Ooo % o0oOOo0O0Ooo . I1Ii111
if 5 - 5: o0oOOo0O0Ooo * ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if ( o000O0o . has_key ( "instance-id" ) ^ oO0Oo . has_key ( "instance-id" ) ) : continue
if ( o000O0o . has_key ( "instance-id" ) and oO0Oo . has_key ( "instance-id" ) ) :
iI1iII1 = o000O0o [ "instance-id" ]
oO0OOoo0OO = oO0Oo [ "instance-id" ]
if ( iI1iII1 != oO0OOoo0OO ) : continue
if 91 - 91: O0
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
return
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
lisp . lisp_glean_mappings . append ( oO0Oo )
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
if 68 - 68: OoOoOO00 - OoO0O00
if 28 - 28: OoO0O00 . OOooOOo / OOooOOo + Oo0Ooo . I1ii11iIi11i
if 1 - 1: iIii1I11I1II1 / II111iiii
def iiI1I11i1i ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_rloc_probe_command ( "RTR" ) )
if 2 - 2: I1ii11iIi11i * I11i - iIii1I11I1II1 + I1IiiI . oO0o % iII111i
if 92 - 92: iII111i
if 25 - 25: Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
def IIIII11I1IiI ( mc , parms ) :
i1I , Oo0OoO00oOO0o , OoOO , ooOOO0 = parms
if 65 - 65: O0
oO00OOoO00 = "{}:{}" . format ( Oo0OoO00oOO0o . print_address_no_iid ( ) , OoOO )
Ii1iIiII1ii1 = lisp . green ( mc . print_eid_tuple ( ) , False )
IiI111111IIII = "Changed '{}' translated address:port to {} for EID {}, {} {}" . format ( ooOOO0 , lisp . red ( oO00OOoO00 , False ) , Ii1iIiII1ii1 , "{}" , "{}" )
if 37 - 37: I1Ii111 / OoOoOO00
if 23 - 23: O0
for o00oO0oOo00 in mc . rloc_set :
if ( o00oO0oOo00 . rle ) :
for oO0oOo0 in o00oO0oOo00 . rle . rle_nodes :
if ( oO0oOo0 . rloc_name != ooOOO0 ) : continue
oO0oOo0 . store_translated_rloc ( Oo0OoO00oOO0o , OoOO )
I1I1I = oO0oOo0 . address . print_address_no_iid ( ) + ":" + str ( oO0oOo0 . translated_port )
if 95 - 95: II111iiii + o0oOOo0O0Ooo + iII111i * iIii1I11I1II1 % oO0o / IiII
lisp . lprint ( IiI111111IIII . format ( "RLE" , I1I1I ) )
if 56 - 56: iII111i
if 86 - 86: II111iiii % I1Ii111
if 15 - 15: i1IIi * I1IiiI + i11iIiiIii
if ( o00oO0oOo00 . rloc_name != ooOOO0 ) : continue
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
I1I1I = o00oO0oOo00 . rloc . print_address_no_iid ( ) + ":" + str ( o00oO0oOo00 . translated_port )
if 25 - 25: OoO0O00
if ( lisp . lisp_crypto_keys_by_rloc_encap . has_key ( I1I1I ) ) :
oOo0oO = lisp . lisp_crypto_keys_by_rloc_encap [ I1I1I ]
lisp . lisp_crypto_keys_by_rloc_encap [ oO00OOoO00 ] = oOo0oO
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
if 78 - 78: i11iIiiIii / iII111i - Ii1I / OOooOOo + oO0o
if 82 - 82: Ii1I
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
o00oO0oOo00 . delete_from_rloc_probe_list ( mc . eid , mc . group )
o00oO0oOo00 . store_translated_rloc ( Oo0OoO00oOO0o , OoOO )
o00oO0oOo00 . add_to_rloc_probe_list ( mc . eid , mc . group )
lisp . lprint ( IiI111111IIII . format ( "RLOC" , I1I1I ) )
if 87 - 87: Oo0Ooo . IiII
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if ( lisp . lisp_rloc_probing ) :
o0oOO000oO0oo = None if ( mc . group . is_null ( ) ) else mc . eid
oOO00O = mc . eid if ( mc . group . is_null ( ) ) else mc . group
lisp . lisp_send_map_request ( i1I , 0 , o0oOO000oO0oo , oOO00O , o00oO0oOo00 )
if 77 - 77: Oo0Ooo - i1IIi - I11i . OoOoOO00
if 39 - 39: II111iiii / ooOoO0o + I1Ii111 / OoOoOO00
if 13 - 13: IiII + O0 + iII111i % I1IiiI / o0oOOo0O0Ooo . IiII
if 86 - 86: oO0o * o0oOOo0O0Ooo % i1IIi . Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
lisp . lisp_write_ipc_map_cache ( True , mc )
return ( True , parms )
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
def OoOOoOooooOOo ( mc , parms ) :
if 87 - 87: I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if ( mc . group . is_null ( ) ) : return ( IIIII11I1IiI ( mc , parms ) )
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if ( mc . source_cache == None ) : return ( True , parms )
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
mc . source_cache . walk_cache ( IIIII11I1IiI , parms )
return ( True , parms )
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
def o00oo0 ( sockets , hostname , rloc , port ) :
lisp . lisp_map_cache . walk_cache ( OoOOoOooooOOo ,
[ sockets , rloc , port , hostname ] )
return
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
def o00Oo0oooooo ( sred , packet ) :
if ( lisp . lisp_data_plane_logging == False ) : return
if 76 - 76: I11i / OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if ( sred in [ "Send" , "Receive" ] ) :
o0o = binascii . hexlify ( packet [ 0 : 20 ] )
lisp . lprint ( "Fast-{}: ip {} {} {} {} {}" . format ( sred , o0o [ 0 : 8 ] , o0o [ 8 : 16 ] ,
o0o [ 16 : 24 ] , o0o [ 24 : 32 ] , o0o [ 32 : 40 ] ) )
elif ( sred in [ "Encap" , "Decap" ] ) :
o0o = binascii . hexlify ( packet [ 0 : 36 ] )
lisp . lprint ( "Fast-{}: ip {} {} {} {} {}, udp {} {}, lisp {} {}" . format ( sred , o0o [ 0 : 8 ] , o0o [ 8 : 16 ] , o0o [ 16 : 24 ] , o0o [ 24 : 32 ] , o0o [ 32 : 40 ] ,
# iIii1I11I1II1 / I11i . OoO0O00 - o0oOOo0O0Ooo
o0o [ 40 : 48 ] , o0o [ 48 : 56 ] , o0o [ 56 : 64 ] , o0o [ 64 : 72 ] ) )
if 48 - 48: i1IIi - Ii1I / O0 * OoO0O00
if 71 - 71: I1ii11iIi11i
if 7 - 7: I1ii11iIi11i - I1IiiI . iIii1I11I1II1 - i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
def ii ( dest , mc ) :
if ( lisp . lisp_data_plane_logging == False ) : return
if 68 - 68: iII111i - I1IiiI / I1Ii111 / I11i
I11iiii = "miss" if mc == None else "hit!"
lisp . lprint ( "Fast-Lookup {} {}" . format ( dest . print_address ( ) , I11iiii ) )
if 60 - 60: I11i . i1IIi + IiII / o0oOOo0O0Ooo . II111iiii
if 82 - 82: I1ii11iIi11i / I1IiiI % iIii1I11I1II1 / i1IIi - I1IiiI
if 7 - 7: I1Ii111 * OoO0O00 - ooOoO0o + OOooOOo * I1IiiI % OoO0O00
if 15 - 15: OoOoOO00 % I1IiiI * I11i
if 81 - 81: ooOoO0o - iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * I11i
if 20 - 20: oO0o % IiII
if 19 - 19: I1ii11iIi11i % IiII + ooOoO0o / I1Ii111 . ooOoO0o
if 12 - 12: i1IIi + i1IIi - I1ii11iIi11i * Oo0Ooo % Oo0Ooo - II111iiii
def o0O ( ts , msg ) :
global I1I11I1I1I
if 84 - 84: OoO0O00 + i1IIi - II111iiii . I1ii11iIi11i * OoooooooOO + I1IiiI
if ( I1I11I1I1I == False ) : return ( None )
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if ( ts == None ) : return ( time . time ( ) )
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
ts = ( time . time ( ) - ts ) * 1000000
lisp . lprint ( "{}-Latency: {} usecs" . format ( msg , round ( ts , 1 ) ) , "force" )
return ( None )
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
def I1IiIiiIiIII ( a ) :
iIIi = ord ( a [ 0 ] ) << 24 | ord ( a [ 1 ] ) << 16 | ord ( a [ 2 ] ) << 8 | ord ( a [ 3 ] )
return ( iIIi )
if 11 - 11: I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if 42 - 42: I1Ii111 . I1IiiI . i1IIi + OoOoOO00 + OOooOOo + I1IiiI
if 31 - 31: iII111i . OOooOOo - ooOoO0o . OoooooooOO / OoooooooOO
if 56 - 56: OoO0O00 / oO0o / i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
iiI1I1 = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
ooO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 6 - 6: iIii1I11I1II1 . ooOoO0o % o0oOOo0O0Ooo
def I1Iii1 ( packet ) :
global lisp_map_cache , o0oOoO00o
if 30 - 30: OoooooooOO - OoOoOO00
Ooo00O0o = o0O ( None , "Fast" )
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
I111i1i1111 = 0
IIII1 = None
if ( packet [ 9 ] == '\x11' ) :
if ( packet [ 20 : 22 ] == '\x10\xf6' ) : return ( False )
if ( packet [ 22 : 24 ] == '\x10\xf6' ) : return ( False )
if 10 - 10: I1Ii111 / ooOoO0o + i11iIiiIii / Ii1I
if ( packet [ 20 : 22 ] == '\x10\xf5' or packet [ 22 : 24 ] == '\x10\xf5' ) :
IIII1 = packet [ 12 : 16 ]
I111i1i1111 = packet [ 32 : 35 ]
I111i1i1111 = ord ( I111i1i1111 [ 0 ] ) << 16 | ord ( I111i1i1111 [ 1 ] ) << 8 | ord ( I111i1i1111 [ 2 ] )
if ( I111i1i1111 == 0xffffff ) : return ( False )
o00Oo0oooooo ( "Decap" , packet )
packet = packet [ 36 : : ]
if 74 - 74: OOooOOo + O0 + i1IIi - i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
if 5 - 5: Ii1I
o00Oo0oooooo ( "Receive" , packet )
if 46 - 46: IiII
if 45 - 45: ooOoO0o
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
if 17 - 17: OOooOOo / OOooOOo / I11i
ii1 = I1IiIiiIiIII ( packet [ 16 : 20 ] )
ooO . instance_id = I111i1i1111
ooO . address = ii1
if 1 - 1: ooOoO0o % iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % I1IiiI
if 89 - 89: Ii1I
if 76 - 76: ooOoO0o
if 15 - 15: OOooOOo . I11i + OoooooooOO - OoO0O00
if ( ( ii1 & 0xe0000000 ) == 0xe0000000 ) : return ( False )
if 69 - 69: iIii1I11I1II1 . I1ii11iIi11i % ooOoO0o + iIii1I11I1II1 / O0 / I1ii11iIi11i
if 61 - 61: OOooOOo % OOooOOo * o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 75 - 75: IiII . ooOoO0o
if 50 - 50: OoOoOO00
ii1 = ooO
O00o0OO0000oo = lisp . lisp_map_cache . lookup_cache ( ii1 , False )
ii ( ii1 , O00o0OO0000oo )
if ( O00o0OO0000oo == None ) : return ( False )
if 27 - 27: O0
if 79 - 79: o0oOOo0O0Ooo - I11i + o0oOOo0O0Ooo . oO0o
if 28 - 28: i1IIi - iII111i
if 54 - 54: iII111i - O0 % OOooOOo
if 73 - 73: O0 . OoOoOO00 + I1IiiI - I11i % I11i . I11i
if ( IIII1 != None ) :
I11ii1i1 = I1IiIiiIiIII ( packet [ 12 : 16 ] )
iiI1I1 . instance_id = I111i1i1111
iiI1I1 . address = I11ii1i1
ooo0OoOOOOO = lisp . lisp_map_cache . lookup_cache ( iiI1I1 , False )
if ( ooo0OoOOOOO == None ) :
i1iIi1iI , i1I11IiI1iiII , o00oOo0oOoo = lisp . lisp_allow_gleaning ( iiI1I1 , None ,
None )
if ( i1iIi1iI ) : return ( False )
elif ( ooo0OoOOOOO . gleaned ) :
IIII1 = I1IiIiiIiIII ( IIII1 )
if ( ooo0OoOOOOO . rloc_set [ 0 ] . rloc . address != IIII1 ) : return ( False )
if 57 - 57: OoOoOO00 - I1ii11iIi11i
if 50 - 50: I1Ii111 / i1IIi % OoO0O00 . I1IiiI / iII111i
if 88 - 88: OOooOOo . I11i * o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . I11i
if 10 - 10: o0oOOo0O0Ooo * Oo0Ooo % O0 * iIii1I11I1II1 . O0 % I1ii11iIi11i
if 44 - 44: II111iiii / iII111i / I11i % II111iiii / i1IIi . Ii1I
O00o0OO0000oo . add_recent_source ( iiI1I1 )
if 59 - 59: OoooooooOO
if 47 - 47: ooOoO0o - I1IiiI / II111iiii
if 12 - 12: OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
if ( O00o0OO0000oo . action == lisp . LISP_NATIVE_FORWARD_ACTION and
O00o0OO0000oo . eid . instance_id == 0 ) :
ii1 . instance_id = lisp . lisp_default_secondary_iid
O00o0OO0000oo = lisp . lisp_map_cache . lookup_cache ( ii1 , False )
ii ( ii1 , O00o0OO0000oo )
if ( O00o0OO0000oo == None ) : return ( False )
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if ( O00o0OO0000oo . action != lisp . LISP_NATIVE_FORWARD_ACTION ) :
if ( O00o0OO0000oo . best_rloc_set == [ ] ) : return ( False )
if 7 - 7: OoooooooOO . IiII
ii1 = O00o0OO0000oo . best_rloc_set [ 0 ]
if ( ii1 . state != lisp . LISP_RLOC_UP_STATE ) : return ( False )
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
I111i1i1111 = O00o0OO0000oo . eid . instance_id
OoOO = ii1 . translated_port
Oooo00 = ii1 . stats
ii1 = ii1 . rloc
I111iIi1 = ii1 . address
IIII1 = lisp . lisp_myrlocs [ 0 ] . address
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
o00 = '\x45\x00'
oO = len ( packet ) + 20 + 8 + 8
o00 += chr ( ( oO >> 8 ) & 0xff ) + chr ( oO & 0xff )
o00 += '\xff\xff\x40\x00\x10\x11\x00\x00'
o00 += chr ( ( IIII1 >> 24 ) & 0xff )
o00 += chr ( ( IIII1 >> 16 ) & 0xff )
o00 += chr ( ( IIII1 >> 8 ) & 0xff )
o00 += chr ( IIII1 & 0xff )
o00 += chr ( ( I111iIi1 >> 24 ) & 0xff )
o00 += chr ( ( I111iIi1 >> 16 ) & 0xff )
o00 += chr ( ( I111iIi1 >> 8 ) & 0xff )
o00 += chr ( I111iIi1 & 0xff )
o00 = lisp . lisp_ip_checksum ( o00 )
if 92 - 92: IiII * Oo0Ooo * Oo0Ooo * I1IiiI . iIii1I11I1II1
if 16 - 16: ooOoO0o % OoooooooOO - OOooOOo * Ii1I * I1ii11iIi11i / OoooooooOO
if 31 - 31: I11i . I1Ii111 * ooOoO0o + i11iIiiIii * oO0o
if 93 - 93: I1ii11iIi11i / iIii1I11I1II1 * i1IIi % OoooooooOO * O0 * I11i
Ooooooo = oO - 20
I1IIIiI1I1ii1 = '\xff\x00' if ( OoOO == 4341 ) else '\x10\xf5'
I1IIIiI1I1ii1 += chr ( ( OoOO >> 8 ) & 0xff ) + chr ( OoOO & 0xff )
I1IIIiI1I1ii1 += chr ( ( Ooooooo >> 8 ) & 0xff ) + chr ( Ooooooo & 0xff ) + '\x00\x00'
if 30 - 30: O0 * OoooooooOO
I1IIIiI1I1ii1 += '\x08\xdf\xdf\xdf'
I1IIIiI1I1ii1 += chr ( ( I111i1i1111 >> 16 ) & 0xff )
I1IIIiI1I1ii1 += chr ( ( I111i1i1111 >> 8 ) & 0xff )
I1IIIiI1I1ii1 += chr ( I111i1i1111 & 0xff )
I1IIIiI1I1ii1 += '\x00'
if 38 - 38: IiII - I1ii11iIi11i . OoOoOO00 - I1Ii111 . OoooooooOO
if 89 - 89: iIii1I11I1II1
if 21 - 21: I11i % I11i
if 27 - 27: i11iIiiIii / I1ii11iIi11i
packet = o00 + I1IIIiI1I1ii1 + packet
o00Oo0oooooo ( "Encap" , packet )
else :
oO = len ( packet )
Oooo00 = O00o0OO0000oo . stats
o00Oo0oooooo ( "Send" , packet )
if 84 - 84: Oo0Ooo
if 43 - 43: oO0o - OoooooooOO
if 3 - 3: O0 / iII111i
if 31 - 31: OOooOOo + o0oOOo0O0Ooo . OoooooooOO
if 89 - 89: II111iiii + i1IIi + II111iiii
O00o0OO0000oo . last_refresh_time = time . time ( )
Oooo00 . increment ( oO )
if 7 - 7: O0 % o0oOOo0O0Ooo + I1ii11iIi11i * iII111i - iII111i
if 42 - 42: OoOoOO00 * OoOoOO00 * I1Ii111 . I11i
if 51 - 51: OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o * iIii1I11I1II1 % OoO0O00
if 99 - 99: oO0o * II111iiii * I1Ii111
ii1 = ii1 . print_address_no_iid ( )
o0oOoO00o . sendto ( packet , ( ii1 , 0 ) )
if 92 - 92: Oo0Ooo
o0O ( Ooo00O0o , "Fast" )
return ( True )
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
if 87 - 87: ooOoO0o
def IIIii ( lisp_packet , thread_name ) :
global Oo0o , O00OooOo00o , IiI11i1IIiiI
global o0oOoO00o , i1
global OOO0o0o
global iIiiI1
global oo0Ooo0
if 60 - 60: I1ii11iIi11i * I1IiiI
Ooo00O0o = o0O ( None , "RTR" )
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
if 41 - 41: Ii1I
if 77 - 77: I1Ii111
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
if ( oo0Ooo0 ) :
if ( I1Iii1 ( lisp_packet . packet ) ) : return
if 38 - 38: OoOoOO00 / iII111i % Oo0Ooo
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
if 61 - 61: I11i % ooOoO0o - OoO0O00 / Oo0Ooo
if 4 - 4: OoooooooOO - i1IIi % Ii1I - OOooOOo * o0oOOo0O0Ooo
Ooooo00o0OoO = lisp_packet
oooo0O0O0o0 = Ooooo00o0OoO . is_lisp_packet ( Ooooo00o0OoO . packet )
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
if 72 - 72: oO0o + oO0o / II111iiii . OoooooooOO % Ii1I
if 49 - 49: oO0o . OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if 2 - 2: OoooooooOO % OOooOOo
if ( oooo0O0O0o0 == False ) :
oOoOOo0oo0 = Ooooo00o0OoO . packet
o0O0Oo00Oo0o , OOOo , OoOO , oo0OOo0O = lisp . lisp_is_rloc_probe ( oOoOOo0oo0 , - 1 )
if ( oOoOOo0oo0 != o0O0Oo00Oo0o ) :
if ( OOOo == None ) : return
lisp . lisp_parse_packet ( Oo0o , o0O0Oo00Oo0o , OOOo , OoOO , oo0OOo0O )
return
if 39 - 39: OoooooooOO + oO0o % OOooOOo / OOooOOo
if 27 - 27: iII111i . I11i . iIii1I11I1II1 . iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo / i1IIi
if 71 - 71: OoOoOO00 . i1IIi
if 94 - 94: OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
Ooooo00o0OoO . packet = lisp . lisp_reassemble ( Ooooo00o0OoO . packet )
if ( Ooooo00o0OoO . packet == None ) : return
if 47 - 47: OoooooooOO
if 4 - 4: I1IiiI % I11i
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
if 39 - 39: oO0o % iIii1I11I1II1 % O0 % OoooooooOO * I1ii11iIi11i + iII111i
if ( lisp . lisp_flow_logging ) : Ooooo00o0OoO = copy . deepcopy ( Ooooo00o0OoO )
if 68 - 68: Oo0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 * iIii1I11I1II1 * i11iIiiIii + I1IiiI / OOooOOo % Ii1I
if 58 - 58: OOooOOo * o0oOOo0O0Ooo + O0 % OOooOOo
if 25 - 25: Oo0Ooo % I1ii11iIi11i * ooOoO0o
if 6 - 6: iII111i . IiII * OoOoOO00 . i1IIi
if 98 - 98: i1IIi
if 65 - 65: OoOoOO00 / OoO0O00 % IiII
if ( oooo0O0O0o0 ) :
if ( Ooooo00o0OoO . decode ( True , None , lisp . lisp_decap_stats ) == None ) : return
Ooooo00o0OoO . print_packet ( "Receive-({})" . format ( thread_name ) , True )
Ooooo00o0OoO . strip_outer_headers ( )
else :
if ( Ooooo00o0OoO . decode ( False , None , None ) == None ) : return
Ooooo00o0OoO . print_packet ( "Receive-({})" . format ( thread_name ) , False )
if 45 - 45: OoOoOO00
if 66 - 66: OoO0O00
if 56 - 56: O0
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
if 23 - 23: oO0o - OOooOOo + I11i
if 12 - 12: I1IiiI / ooOoO0o % o0oOOo0O0Ooo / i11iIiiIii % OoooooooOO
if 15 - 15: iIii1I11I1II1 % OoooooooOO - Oo0Ooo * Ii1I + I11i
if 11 - 11: iII111i * Ii1I - OoOoOO00
if 66 - 66: OoOoOO00 . i11iIiiIii - iII111i * o0oOOo0O0Ooo + OoooooooOO * I1ii11iIi11i
if 74 - 74: Oo0Ooo
if 61 - 61: Oo0Ooo - I1Ii111 * II111iiii % ooOoO0o * iIii1I11I1II1 + OoO0O00
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if ( oooo0O0O0o0 and Ooooo00o0OoO . lisp_header . get_instance_id ( ) == 0xffffff ) :
II1I1iiIII1I1 = lisp . lisp_control_header ( )
II1I1iiIII1I1 . decode ( Ooooo00o0OoO . packet )
if ( II1I1iiIII1I1 . is_info_request ( ) ) :
o0Ooo0o0ooo0 = lisp . lisp_info ( )
o0Ooo0o0ooo0 . decode ( Ooooo00o0OoO . packet )
o0Ooo0o0ooo0 . print_info ( )
if 70 - 70: i11iIiiIii % iII111i
if 11 - 11: IiII % I1ii11iIi11i % Ii1I / II111iiii % I1Ii111 - Oo0Ooo
if 96 - 96: I1ii11iIi11i / II111iiii . Ii1I - iII111i * I11i * oO0o
if 76 - 76: Ii1I - II111iiii * OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
ooo = o0Ooo0o0ooo0 . hostname if ( o0Ooo0o0ooo0 . hostname != None ) else ""
OOOO0oooo = Ooooo00o0OoO . outer_source
o0o = Ooooo00o0OoO . udp_sport
if ( lisp . lisp_store_nat_info ( ooo , OOOO0oooo , o0o ) ) :
o00oo0 ( Oo0o , ooo , OOOO0oooo , o0o )
if 51 - 51: O0 - i1IIi / I1IiiI
else :
OOOo = Ooooo00o0OoO . outer_source . print_address_no_iid ( )
oo0OOo0O = Ooooo00o0OoO . outer_ttl
Ooooo00o0OoO = Ooooo00o0OoO . packet
if ( lisp . lisp_is_rloc_probe_request ( Ooooo00o0OoO [ 28 ] ) == False and
lisp . lisp_is_rloc_probe_reply ( Ooooo00o0OoO [ 28 ] ) == False ) : oo0OOo0O = - 1
Ooooo00o0OoO = Ooooo00o0OoO [ 28 : : ]
lisp . lisp_parse_packet ( Oo0o , Ooooo00o0OoO , OOOo , 0 , oo0OOo0O )
if 37 - 37: o0oOOo0O0Ooo % ooOoO0o
return
if 83 - 83: OOooOOo . I1Ii111 + oO0o - OOooOOo * I1Ii111 / I1Ii111
if 39 - 39: I1Ii111 / Oo0Ooo % OoO0O00 % i11iIiiIii
if 90 - 90: I1Ii111 - OoooooooOO
if 96 - 96: O0 . Ii1I % OoO0O00 * iIii1I11I1II1
if 54 - 54: Ii1I * I1Ii111 - OoooooooOO % I1IiiI + O0
if 6 - 6: I1ii11iIi11i - II111iiii / oO0o + i11iIiiIii + OOooOOo
if ( lisp . lisp_ipc_data_plane ) :
lisp . dprint ( "Drop packet, external data-plane active" )
return
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
if 79 - 79: Ii1I . OoO0O00
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
if ( oooo0O0O0o0 ) :
lisp . lisp_decap_stats [ "good-packets" ] . increment ( len ( Ooooo00o0OoO . packet ) )
if 52 - 52: i1IIi
if 84 - 84: Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
if 11 - 11: I1IiiI * oO0o + I1ii11iIi11i / I1ii11iIi11i
if 37 - 37: i11iIiiIii + i1IIi
I1i11II = False
if ( Ooooo00o0OoO . inner_dest . is_mac ( ) ) :
Ooooo00o0OoO . packet = lisp . lisp_mac_input ( Ooooo00o0OoO . packet )
if ( Ooooo00o0OoO . packet == None ) : return
Ooooo00o0OoO . encap_port = lisp . LISP_VXLAN_DATA_PORT
elif ( Ooooo00o0OoO . inner_version == 4 ) :
I1i11II , Ooooo00o0OoO . packet = lisp . lisp_ipv4_input ( Ooooo00o0OoO . packet )
if ( Ooooo00o0OoO . packet == None ) : return
Ooooo00o0OoO . inner_ttl = Ooooo00o0OoO . outer_ttl
elif ( Ooooo00o0OoO . inner_version == 6 ) :
Ooooo00o0OoO . packet = lisp . lisp_ipv6_input ( Ooooo00o0OoO )
if ( Ooooo00o0OoO . packet == None ) : return
Ooooo00o0OoO . inner_ttl = Ooooo00o0OoO . outer_ttl
else :
lisp . dprint ( "Cannot parse inner packet header" )
return
if 31 - 31: oO0o / IiII * o0oOOo0O0Ooo . II111iiii
if 89 - 89: O0
if 2 - 2: I1ii11iIi11i . I1ii11iIi11i + I1ii11iIi11i * o0oOOo0O0Ooo
if 100 - 100: Oo0Ooo % Ii1I / I11i
if 30 - 30: Oo0Ooo - OOooOOo - iII111i
if ( Ooooo00o0OoO . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( Ooooo00o0OoO , ed = "decap" ) == False ) : return
Ooooo00o0OoO . outer_source . afi = lisp . LISP_AFI_NONE
Ooooo00o0OoO . outer_dest . afi = lisp . LISP_AFI_NONE
if 81 - 81: o0oOOo0O0Ooo . OoooooooOO + OOooOOo * ooOoO0o
if 74 - 74: i1IIi + O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
i1iIi1iI , i1I11IiI1iiII , o00oOo0oOoo = lisp . lisp_allow_gleaning ( Ooooo00o0OoO . inner_source , None ,
Ooooo00o0OoO . outer_source )
if ( i1iIi1iI ) :
OoooO0o = Ooooo00o0OoO . packet if ( I1i11II ) else None
lisp . lisp_glean_map_cache ( Ooooo00o0OoO . inner_source , Ooooo00o0OoO . outer_source ,
Ooooo00o0OoO . udp_sport , OoooO0o )
if ( I1i11II ) : return
if 24 - 24: OoOoOO00 % i1IIi + iII111i . i11iIiiIii . I1ii11iIi11i
if 17 - 17: I1ii11iIi11i . II111iiii . ooOoO0o / I1ii11iIi11i
if 57 - 57: I11i
if 67 - 67: OoO0O00 . ooOoO0o
if 87 - 87: oO0o % Ii1I
if 83 - 83: II111iiii - I11i
oOO00O = Ooooo00o0OoO . inner_dest
if ( oOO00O . is_multicast_address ( ) ) :
if ( oOO00O . is_link_local_multicast ( ) ) :
iiIii1IIi = lisp . green ( oOO00O . print_address ( ) , False )
lisp . dprint ( "Drop link-local multicast EID {}" . format ( iiIii1IIi ) )
return
if 10 - 10: i11iIiiIii - o0oOOo0O0Ooo % iIii1I11I1II1
i111IIIiI = False
i1I11IiI1iiII , o00oOo0oOoo , III111iiIi1 = lisp . lisp_allow_gleaning ( Ooooo00o0OoO . inner_source , oOO00O , None )
else :
i111IIIiI , i1I11IiI1iiII , o00oOo0oOoo = lisp . lisp_allow_gleaning ( oOO00O , None , None )
if 29 - 29: OoooooooOO + Ii1I % iIii1I11I1II1 - OOooOOo . I1IiiI % Oo0Ooo
Ooooo00o0OoO . gleaned_dest = i111IIIiI
if 16 - 16: IiII / Oo0Ooo + OOooOOo / Ii1I
if 42 - 42: Oo0Ooo + II111iiii - I1IiiI / I11i % IiII
if 66 - 66: OOooOOo + i1IIi . I1IiiI + OOooOOo - I11i
if 17 - 17: O0 . I1Ii111 . O0 + O0 / Oo0Ooo . ooOoO0o
O00o0OO0000oo = lisp . lisp_map_cache_lookup ( Ooooo00o0OoO . inner_source , Ooooo00o0OoO . inner_dest )
if ( O00o0OO0000oo ) : O00o0OO0000oo . add_recent_source ( Ooooo00o0OoO . inner_source )
if 62 - 62: I1ii11iIi11i % iII111i * OoO0O00 - i1IIi
if 66 - 66: i11iIiiIii / o0oOOo0O0Ooo - OoooooooOO / i1IIi . i11iIiiIii
if 16 - 16: Oo0Ooo % I1ii11iIi11i + I11i - O0 . iII111i / I1Ii111
if 35 - 35: oO0o / I1Ii111 / II111iiii - iIii1I11I1II1 + II111iiii . I1Ii111
if 81 - 81: iII111i * OOooOOo - I1ii11iIi11i * Ii1I % OoOoOO00 * OoOoOO00
if ( O00o0OO0000oo and ( O00o0OO0000oo . action == lisp . LISP_NATIVE_FORWARD_ACTION or
O00o0OO0000oo . eid . address == 0 ) ) :
ooOOo0O = lisp . lisp_db_for_lookups . lookup_cache ( Ooooo00o0OoO . inner_source , False )
if ( ooOOo0O and ooOOo0O . secondary_iid ) :
Ii11 = Ooooo00o0OoO . inner_dest
Ii11 . instance_id = ooOOo0O . secondary_iid
if 8 - 8: Oo0Ooo + II111iiii * OOooOOo * OoOoOO00 * I11i / IiII
O00o0OO0000oo = lisp . lisp_map_cache_lookup ( Ooooo00o0OoO . inner_source , Ii11 )
if ( O00o0OO0000oo ) :
Ooooo00o0OoO . gleaned_dest = O00o0OO0000oo . gleaned
O00o0OO0000oo . add_recent_source ( Ooooo00o0OoO . inner_source )
else :
i111IIIiI , i1I11IiI1iiII , o00oOo0oOoo = lisp . lisp_allow_gleaning ( Ii11 , None ,
None )
Ooooo00o0OoO . gleaned_dest = i111IIIiI
if 21 - 21: oO0o / OoooooooOO
if 11 - 11: OOooOOo % Ii1I - i11iIiiIii - oO0o + ooOoO0o + IiII
if 87 - 87: I1Ii111 * i1IIi / I1ii11iIi11i
if 6 - 6: o0oOOo0O0Ooo + Oo0Ooo - OoooooooOO % OOooOOo * OoOoOO00
if 69 - 69: i1IIi
if 59 - 59: II111iiii - o0oOOo0O0Ooo
if 24 - 24: Oo0Ooo - i1IIi + I11i
if 38 - 38: OoooooooOO / I1ii11iIi11i . O0 / i1IIi / Oo0Ooo + iIii1I11I1II1
if 96 - 96: iII111i
if ( O00o0OO0000oo == None and i111IIIiI ) :
lisp . lprint ( "Suppress Map-Request for gleaned EID {}" . format ( lisp . green ( Ooooo00o0OoO . inner_dest . print_address ( ) , False ) ) )
if 18 - 18: iII111i * I11i - Ii1I
return
if 31 - 31: Oo0Ooo - O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if ( O00o0OO0000oo == None or lisp . lisp_mr_or_pubsub ( O00o0OO0000oo . action ) ) :
if ( lisp . lisp_rate_limit_map_request ( Ooooo00o0OoO . inner_dest ) ) : return
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
iii1III1i = ( O00o0OO0000oo and O00o0OO0000oo . action == lisp . LISP_SEND_PUBSUB_ACTION )
lisp . lisp_send_map_request ( Oo0o , Ooo ,
Ooooo00o0OoO . inner_source , Ooooo00o0OoO . inner_dest , None , iii1III1i )
if 17 - 17: II111iiii / II111iiii
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = OOO0o0o
o0OO0Oo = "map-cache miss"
lisp . lisp_trace_append ( Ooooo00o0OoO , reason = o0OO0Oo , lisp_socket = OOOO0oooo )
if 3 - 3: I1Ii111 - O0 % iIii1I11I1II1 / IiII . o0oOOo0O0Ooo
return
if 3 - 3: O0 % OoooooooOO / OOooOOo
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if ( O00o0OO0000oo and O00o0OO0000oo . refresh ( ) ) :
if ( lisp . lisp_rate_limit_map_request ( Ooooo00o0OoO . inner_dest ) == False ) :
lisp . lprint ( "Refresh map-cache entry {}" . format ( lisp . green ( O00o0OO0000oo . print_eid_tuple ( ) , False ) ) )
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
lisp . lisp_send_map_request ( Oo0o , Ooo ,
Ooooo00o0OoO . inner_source , Ooooo00o0OoO . inner_dest , None )
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
if 19 - 19: i11iIiiIii
if 54 - 54: II111iiii . I11i
if 73 - 73: OoOoOO00 . I1IiiI
if 32 - 32: OoOoOO00 * I1IiiI % ooOoO0o * Ii1I . O0
if 48 - 48: iII111i * iII111i
if 13 - 13: Ii1I / I11i + OoOoOO00 . o0oOOo0O0Ooo % ooOoO0o
O00o0OO0000oo . last_refresh_time = time . time ( )
O00o0OO0000oo . stats . increment ( len ( Ooooo00o0OoO . packet ) )
if 48 - 48: I1IiiI / i11iIiiIii - o0oOOo0O0Ooo * oO0o / OoooooooOO
if 89 - 89: iIii1I11I1II1 / I1IiiI - II111iiii / Ii1I . i11iIiiIii . Ii1I
if 48 - 48: O0 + O0 . I1Ii111 - ooOoO0o
if 63 - 63: oO0o
Oo0 , OOo0Oo0OOo0 , i1i11I , iiIiI , i1oOOOOOOOoO , o00oO0oOo00 = O00o0OO0000oo . select_rloc ( Ooooo00o0OoO , None )
if 12 - 12: iII111i . IiII . OoOoOO00 / O0
if 58 - 58: o0oOOo0O0Ooo - II111iiii % oO0o + I1Ii111 . OoOoOO00 / IiII
if ( Oo0 == None and i1oOOOOOOOoO == None ) :
if ( iiIiI == lisp . LISP_NATIVE_FORWARD_ACTION ) :
lisp . dprint ( "Natively forwarding" )
Ooooo00o0OoO . send_packet ( o0oOoO00o , Ooooo00o0OoO . inner_dest )
if 8 - 8: I1ii11iIi11i . OoO0O00 * I11i + II111iiii % i11iIiiIii
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = OOO0o0o
o0OO0Oo = "not an EID"
lisp . lisp_trace_append ( Ooooo00o0OoO , reason = o0OO0Oo , lisp_socket = OOOO0oooo )
if 8 - 8: ooOoO0o * O0
o0O ( Ooo00O0o , "RTR" )
return
if 73 - 73: o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
o0OO0Oo = "No reachable RLOCs found"
lisp . dprint ( o0OO0Oo )
if 11 - 11: OoOoOO00 + IiII - OoooooooOO / OoO0O00
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = OOO0o0o
lisp . lisp_trace_append ( Ooooo00o0OoO , reason = o0OO0Oo , lisp_socket = OOOO0oooo )
if 34 - 34: ooOoO0o
return
if 45 - 45: ooOoO0o / Oo0Ooo / Ii1I
if ( Oo0 and Oo0 . is_null ( ) ) :
lisp . dprint ( "Drop action RLOC found" )
if 44 - 44: I1ii11iIi11i - Ii1I / II111iiii * OoO0O00 * Oo0Ooo
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = OOO0o0o
o0OO0Oo = "drop action"
lisp . lisp_trace_append ( Ooooo00o0OoO , reason = o0OO0Oo , lisp_socket = OOOO0oooo )
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
return
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
if 58 - 58: I1IiiI - I1ii11iIi11i % O0 . I1IiiI % OoO0O00 % IiII
if 87 - 87: oO0o - i11iIiiIii
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - o0oOOo0O0Ooo
Ooooo00o0OoO . outer_tos = Ooooo00o0OoO . inner_tos
Ooooo00o0OoO . outer_ttl = Ooooo00o0OoO . inner_ttl
if 23 - 23: I11i
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
if 14 - 14: I1ii11iIi11i
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if ( Oo0 ) :
Ooooo00o0OoO . encap_port = OOo0Oo0OOo0
if ( OOo0Oo0OOo0 == 0 ) : Ooooo00o0OoO . encap_port = lisp . LISP_DATA_PORT
Ooooo00o0OoO . outer_dest . copy_address ( Oo0 )
ooO0oo0o0 = Ooooo00o0OoO . outer_dest . afi_to_version ( )
Ooooo00o0OoO . outer_version = ooO0oo0o0
if 9 - 9: I1IiiI + I1ii11iIi11i / I1IiiI . oO0o * ooOoO0o
i1i1ii1111i1i = iIiiI1 if ( ooO0oo0o0 == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 46 - 46: i1IIi
Ooooo00o0OoO . outer_source . copy_address ( i1i1ii1111i1i )
if 54 - 54: II111iiii - OoOoOO00
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = OOO0o0o
if ( lisp . lisp_trace_append ( Ooooo00o0OoO , rloc_entry = o00oO0oOo00 ,
lisp_socket = OOOO0oooo ) == False ) : return
if 73 - 73: OOooOOo
if 2 - 2: i11iIiiIii - II111iiii / oO0o % O0
if 66 - 66: Oo0Ooo
if 28 - 28: IiII - IiII . i1IIi - ooOoO0o + I1IiiI . IiII
if 54 - 54: OoOoOO00 - I1Ii111
if ( Ooooo00o0OoO . encode ( i1i11I ) == None ) : return
if ( len ( Ooooo00o0OoO . packet ) <= 1500 ) : Ooooo00o0OoO . print_packet ( "Send" , True )
if 3 - 3: I1IiiI - Oo0Ooo
if 16 - 16: oO0o + ooOoO0o / o0oOOo0O0Ooo
if 82 - 82: IiII * i11iIiiIii % II111iiii - OoooooooOO
if 90 - 90: Oo0Ooo . oO0o * i1IIi - i1IIi
IiIiiI11i1Ii = i1 if ooO0oo0o0 == 6 else o0oOoO00o
Ooooo00o0OoO . send_packet ( IiIiiI11i1Ii , Ooooo00o0OoO . outer_dest )
if 100 - 100: I1Ii111 . I1IiiI * I1Ii111 - I1IiiI . I11i * Ii1I
elif ( i1oOOOOOOOoO ) :
if 89 - 89: OoO0O00 + IiII * I1Ii111
if 28 - 28: OoooooooOO . oO0o % I1ii11iIi11i / i1IIi / OOooOOo
if 36 - 36: o0oOOo0O0Ooo + I11i - IiII + iIii1I11I1II1 + OoooooooOO
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
oOoOo = len ( Ooooo00o0OoO . packet )
for oO0OO in i1oOOOOOOOoO . rle_forwarding_list :
Ooooo00o0OoO . outer_dest . copy_address ( oO0OO . address )
Ooooo00o0OoO . encap_port = lisp . LISP_DATA_PORT if oO0OO . translated_port == 0 else oO0OO . translated_port
if 88 - 88: OoOoOO00 - i11iIiiIii % o0oOOo0O0Ooo * I11i + I1ii11iIi11i
if 52 - 52: II111iiii . I1IiiI + OoOoOO00 % OoO0O00
ooO0oo0o0 = Ooooo00o0OoO . outer_dest . afi_to_version ( )
Ooooo00o0OoO . outer_version = ooO0oo0o0
if 62 - 62: o0oOOo0O0Ooo
i1i1ii1111i1i = iIiiI1 if ( ooO0oo0o0 == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 15 - 15: I11i + Ii1I . OOooOOo * OoO0O00 . OoOoOO00
Ooooo00o0OoO . outer_source . copy_address ( i1i1ii1111i1i )
if 18 - 18: i1IIi % II111iiii + I1Ii111 % Ii1I
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = OOO0o0o
o0OO0Oo = "replicate"
if ( lisp . lisp_trace_append ( Ooooo00o0OoO , reason = o0OO0Oo , lisp_socket = OOOO0oooo ) == False ) : return
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if ( Ooooo00o0OoO . encode ( None ) == None ) : return
if 87 - 87: OoO0O00 % I1IiiI
Ooooo00o0OoO . print_packet ( "Replicate-to-L{}" . format ( oO0OO . level ) , True )
Ooooo00o0OoO . send_packet ( o0oOoO00o , Ooooo00o0OoO . outer_dest )
if 77 - 77: iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
I1I1iII1i = len ( Ooooo00o0OoO . packet ) - oOoOo
Ooooo00o0OoO . packet = Ooooo00o0OoO . packet [ I1I1iII1i : : ]
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
if ( lisp . lisp_flow_logging ) : Ooooo00o0OoO = copy . deepcopy ( Ooooo00o0OoO )
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
del ( Ooooo00o0OoO )
if 18 - 18: OOooOOo + I1Ii111
o0O ( Ooo00O0o , "RTR" )
return
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
def iIiIi1ii ( lisp_thread ) :
lisp . lisp_set_exception ( )
while ( True ) :
if 28 - 28: iIii1I11I1II1 + iIii1I11I1II1
if 28 - 28: oO0o
if 52 - 52: I1IiiI + iIii1I11I1II1
if 71 - 71: O0 / oO0o
Ooooo00o0OoO = lisp_thread . input_queue . get ( )
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
lisp_thread . input_stats . increment ( len ( Ooooo00o0OoO ) )
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
if 47 - 47: oO0o % iIii1I11I1II1
if 11 - 11: I1IiiI % Ii1I - OoO0O00 - oO0o + o0oOOo0O0Ooo
lisp_thread . lisp_packet . packet = Ooooo00o0OoO
if 98 - 98: iII111i + Ii1I - OoO0O00
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
if 38 - 38: O0 - IiII % I1Ii111
IIIii ( lisp_thread . lisp_packet , lisp_thread . thread_name )
if 64 - 64: iIii1I11I1II1
return
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
if 6 - 6: Oo0Ooo % IiII * I11i / I1IiiI + Oo0Ooo
if 39 - 39: OoOoOO00 - Oo0Ooo / iII111i * OoooooooOO
if 100 - 100: O0 . I11i . OoO0O00 + O0 * oO0o
if 42 - 42: oO0o % OoooooooOO + o0oOOo0O0Ooo
if 56 - 56: OoooooooOO + I1ii11iIi11i - iII111i
if 24 - 24: o0oOOo0O0Ooo + ooOoO0o + I11i - iIii1I11I1II1
def I11 ( thread ) :
Oo0oO00 = ( time . time ( ) % thread . number_of_pcap_threads )
return ( int ( Oo0oO00 ) == thread . thread_number )
if 8 - 8: I1IiiI % I1IiiI . OoOoOO00 % o0oOOo0O0Ooo
if 47 - 47: ooOoO0o + II111iiii % I1Ii111 . I11i % I1ii11iIi11i
if 7 - 7: O0 / iII111i * oO0o
if 29 - 29: o0oOOo0O0Ooo
if 86 - 86: II111iiii . IiII
if 2 - 2: OoooooooOO
if 60 - 60: OoO0O00
if 81 - 81: OoOoOO00 % Ii1I
def oo0 ( parms , not_used , packet ) :
if ( I11 ( parms [ 1 ] ) == False ) : return
if 16 - 16: Ii1I * OoO0O00 / oO0o
II1iiI = parms [ 0 ]
III1Ii1i1I1 = parms [ 1 ]
O0O00OooO = III1Ii1i1I1 . number_of_worker_threads
if 40 - 40: I11i % OoooooooOO - OOooOOo + o0oOOo0O0Ooo / OOooOOo
III1Ii1i1I1 . input_stats . increment ( len ( packet ) )
if 84 - 84: O0
if 11 - 11: II111iiii / i11iIiiIii / O0
if 94 - 94: ooOoO0o * I11i - IiII . iIii1I11I1II1
if 66 - 66: ooOoO0o - OOooOOo * OoOoOO00 / oO0o * II111iiii * OoO0O00
if 91 - 91: OoooooooOO / Ii1I . I1IiiI + ooOoO0o . II111iiii
if 45 - 45: oO0o * OoOoOO00 / iIii1I11I1II1
o00ooOoO0 = 4 if II1iiI == "lo0" else ( 14 if lisp . lisp_is_macos ( ) else 16 )
packet = packet [ o00ooOoO0 : : ]
if 15 - 15: OOooOOo * I11i / I1ii11iIi11i * o0oOOo0O0Ooo
if 94 - 94: iII111i + Ii1I % o0oOOo0O0Ooo
if 1 - 1: OoOoOO00 % I1Ii111 - OOooOOo + oO0o + O0 * o0oOOo0O0Ooo
if 97 - 97: OoOoOO00
if ( O0O00OooO ) :
OoOo = III1Ii1i1I1 . input_stats . packet_count % O0O00OooO
OoOo = OoOo + ( len ( O0ooo00OOo00 ) - O0O00OooO )
OOoO0oo0O = O0ooo00OOo00 [ OoOo ]
OOoO0oo0O . input_queue . put ( packet )
else :
III1Ii1i1I1 . lisp_packet . packet = packet
IIIii ( III1Ii1i1I1 . lisp_packet , III1Ii1i1I1 . thread_name )
if 49 - 49: o0oOOo0O0Ooo
return
if 31 - 31: OoO0O00 * i11iIiiIii * Ii1I . i11iIiiIii
if 12 - 12: OoOoOO00 % IiII % I1ii11iIi11i . i11iIiiIii * iIii1I11I1II1
if 66 - 66: i11iIiiIii * iIii1I11I1II1 % OoooooooOO
if 5 - 5: OoOoOO00 % OoooooooOO
if 60 - 60: OoOoOO00 . i1IIi % OoO0O00 % ooOoO0o % OOooOOo
if 33 - 33: iIii1I11I1II1 - Ii1I * I1ii11iIi11i % iIii1I11I1II1 + OoO0O00 . OOooOOo
if 56 - 56: i11iIiiIii * iII111i . oO0o
if 78 - 78: OoOoOO00
def iI1 ( lisp_thread ) :
lisp . lisp_set_exception ( )
if ( lisp . lisp_myrlocs [ 0 ] == None ) : return
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
II1iiI = "lo0" if lisp . lisp_is_macos ( ) else "any"
iI1IiiiIiI1Ii = pcappy . open_live ( II1iiI , 9000 , 0 , 100 )
if 78 - 78: OoooooooOO / OOooOOo % OoOoOO00 * OoooooooOO
if 68 - 68: oO0o
if 29 - 29: iII111i + i11iIiiIii % I11i
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
oOO0 = getoutput ( "egrep 'lisp-nat = yes' ./lisp.config" )
oOO0 = ( oOO0 != "" and oOO0 [ 0 ] == " " )
if 15 - 15: Oo0Ooo + I11i . ooOoO0o - iIii1I11I1II1 / O0 % iIii1I11I1II1
oO0O = "(dst host "
Oo00o0O0O = ""
for oO00OOoO00 in lisp . lisp_get_all_addresses ( ) :
oO0O += "{} or " . format ( oO00OOoO00 )
Oo00o0O0O += "{} or " . format ( oO00OOoO00 )
if 84 - 84: I11i % i1IIi
oO0O = oO0O [ 0 : - 4 ]
oO0O += ") and ((udp dst port 4341 or 8472 or 4789) or "
oO0O += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + "(ip[6]&0xe0 == 0 and ip[7] != 0))))"
if 33 - 33: I1ii11iIi11i * I1ii11iIi11i . ooOoO0o . i11iIiiIii
if 48 - 48: o0oOOo0O0Ooo . Ii1I + OoOoOO00 % I1ii11iIi11i / i11iIiiIii
if 74 - 74: II111iiii . O0 - I1IiiI + IiII % i11iIiiIii % OoOoOO00
if 78 - 78: Ii1I + OoOoOO00 + IiII - IiII . i11iIiiIii / OoO0O00
if 27 - 27: Ii1I - O0 % I11i * I1Ii111 . IiII % iIii1I11I1II1
if 37 - 37: OoooooooOO + O0 - i1IIi % ooOoO0o
Oo00o0O0O = Oo00o0O0O [ 0 : - 4 ]
oO0O += ( " or (not (src host {}) and " + "((udp src port 4342 and ip[28] == 0x28) or " + "(udp dst port 4342 and ip[28] == 0x12)))" ) . format ( Oo00o0O0O )
if 24 - 24: OoOoOO00
if 94 - 94: i1IIi * i1IIi % II111iiii + OOooOOo
if 28 - 28: I1IiiI
if ( oOO0 ) :
oO0O += ( " or (dst net 0.0.0.0/0 and " + "not (host {} or src net 127.0.0.0/8))" ) . format ( Oo00o0O0O )
if 49 - 49: I11i . o0oOOo0O0Ooo % oO0o / Ii1I
if 95 - 95: O0 * OoOoOO00 * IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
lisp . lprint ( "Capturing packets for: '{}'" . format ( oO0O ) )
iI1IiiiIiI1Ii . filter = oO0O
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
if 77 - 77: Oo0Ooo
iI1IiiiIiI1Ii . loop ( - 1 , oo0 , [ II1iiI , lisp_thread ] )
return
if 46 - 46: I1Ii111
if 72 - 72: iII111i * OOooOOo
if 67 - 67: i1IIi
if 5 - 5: II111iiii . OoooooooOO
if 57 - 57: I1IiiI
if 35 - 35: OoooooooOO - I1Ii111 / OoO0O00
if 50 - 50: OoOoOO00
if 33 - 33: I11i
def oOo00OoO0O ( lisp_raw_socket , eid , geid , igmp ) :
if 69 - 69: iIii1I11I1II1 * I1IiiI - iII111i + O0 + O0
if 65 - 65: I1Ii111 / i11iIiiIii / OoO0O00 - OOooOOo
if 9 - 9: I1IiiI / I1Ii111 - Oo0Ooo * iIii1I11I1II1
if 86 - 86: II111iiii + ooOoO0o + IiII
Ooooo00o0OoO = lisp . lisp_packet ( igmp )
if 9 - 9: ooOoO0o + II111iiii % ooOoO0o % IiII + iIii1I11I1II1
if 59 - 59: i1IIi
if 48 - 48: O0 * Ii1I * OoO0O00 . OoO0O00 * I11i - Ii1I
if 14 - 14: I1ii11iIi11i + i11iIiiIii
O00o0OO0000oo = lisp . lisp_map_cache_lookup ( eid , geid )
if ( O00o0OO0000oo == None ) : return
if ( O00o0OO0000oo . rloc_set == [ ] ) : return
if ( O00o0OO0000oo . rloc_set [ 0 ] . rle == None ) : return
if 83 - 83: I1ii11iIi11i / i11iIiiIii + II111iiii . iII111i * OOooOOo + IiII
iiii1i1II1 = eid . print_address_no_iid ( )
for oO0oOo0 in O00o0OO0000oo . rloc_set [ 0 ] . rle . rle_nodes :
if ( oO0oOo0 . rloc_name == iiii1i1II1 ) :
Ooooo00o0OoO . outer_dest . copy_address ( oO0oOo0 . address )
Ooooo00o0OoO . encap_port = oO0oOo0 . translated_port
break
if 63 - 63: iIii1I11I1II1 % I1ii11iIi11i - iII111i
if 17 - 17: I1IiiI
if ( Ooooo00o0OoO . outer_dest . is_null ( ) ) : return
if 88 - 88: OoooooooOO
Ooooo00o0OoO . outer_source . copy_address ( lisp . lisp_myrlocs [ 0 ] )
Ooooo00o0OoO . outer_version = Ooooo00o0OoO . outer_dest . afi_to_version ( )
Ooooo00o0OoO . outer_ttl = 32
Ooooo00o0OoO . inner_source . copy_address ( lisp . lisp_myrlocs [ 0 ] )
Ooooo00o0OoO . inner_dest . store_address ( "[{}]224.0.0.1" . format ( geid . instance_id ) )
Ooooo00o0OoO . inner_ttl = 1
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
o000O0o = lisp . green ( eid . print_address ( ) , False )
o0OO0Oo = lisp . red ( "{}:{}" . format ( Ooooo00o0OoO . outer_dest . print_address_no_iid ( ) ,
Ooooo00o0OoO . encap_port ) , False )
Oo0O = lisp . bold ( "IGMP Query" , False )
if 88 - 88: I1IiiI % OOooOOo % I1ii11iIi11i . i11iIiiIii % o0oOOo0O0Ooo
lisp . lprint ( "Data encapsulate {} to gleaned EID {}, RLOC {}" . format ( Oo0O , o000O0o , o0OO0Oo ) )
if 38 - 38: I1Ii111 + OoooooooOO . i1IIi
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
if 48 - 48: iII111i + IiII
if 60 - 60: I11i + iII111i . IiII / i1IIi . iIii1I11I1II1
if 14 - 14: OOooOOo
if ( Ooooo00o0OoO . encode ( None ) == None ) : return
Ooooo00o0OoO . print_packet ( "Send" , True )
if 79 - 79: Ii1I
Ooooo00o0OoO . send_packet ( lisp_raw_socket , Ooooo00o0OoO . outer_dest )
if 76 - 76: iIii1I11I1II1
if 80 - 80: iIii1I11I1II1 . O0 / Ii1I % Ii1I
if 93 - 93: OoooooooOO * Oo0Ooo
if 10 - 10: I1Ii111 * OoooooooOO + I11i - I1ii11iIi11i / I1ii11iIi11i . i11iIiiIii
if 22 - 22: I1Ii111 / o0oOOo0O0Ooo
if 98 - 98: i1IIi
if 51 - 51: I1ii11iIi11i + ooOoO0o + Oo0Ooo / i1IIi + i1IIi
if 12 - 12: iIii1I11I1II1 . Ii1I . I1ii11iIi11i % I1IiiI . II111iiii . oO0o
if 32 - 32: I1ii11iIi11i + IiII / O0 / OoOoOO00 * OoooooooOO % ooOoO0o
if 50 - 50: OoO0O00
if 66 - 66: iIii1I11I1II1
if 41 - 41: I1Ii111 . O0 * I1IiiI * I1ii11iIi11i
if 100 - 100: iII111i
if 73 - 73: I1ii11iIi11i % II111iiii
if 79 - 79: OoOoOO00 + OoO0O00 - II111iiii + Ii1I
if 11 - 11: oO0o + iIii1I11I1II1
if 10 - 10: O0
if 68 - 68: OOooOOo + oO0o . O0 . Ii1I % i1IIi % OOooOOo
if 50 - 50: IiII + o0oOOo0O0Ooo
if 96 - 96: OoO0O00
if 92 - 92: Oo0Ooo / i11iIiiIii + I1ii11iIi11i
if 87 - 87: OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
def OooOooO0O0o0 ( lisp_raw_socket ) :
if ( lisp . lisp_gleaned_groups == { } ) : return
if 59 - 59: Oo0Ooo + iII111i - OOooOOo . o0oOOo0O0Ooo + I1IiiI % oO0o
if 37 - 37: iII111i + iII111i % o0oOOo0O0Ooo
if 29 - 29: ooOoO0o
if 41 - 41: O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
oOOoOOO0oo0 = "\x46\xc0\x00\x24\x00\x00\x40\x00\x01\x02\x00\x00"
O00O = lisp . lisp_myrlocs [ 0 ]
Oo0OoO00oOO0o = O00O . address
oOOoOOO0oo0 += chr ( ( Oo0OoO00oOO0o >> 24 ) & 0xff )
oOOoOOO0oo0 += chr ( ( Oo0OoO00oOO0o >> 16 ) & 0xff )
oOOoOOO0oo0 += chr ( ( Oo0OoO00oOO0o >> 8 ) & 0xff )
oOOoOOO0oo0 += chr ( Oo0OoO00oOO0o & 0xff )
oOOoOOO0oo0 += "\xe0\x00\x00\x01"
oOOoOOO0oo0 += "\x94\x04\x00\x00"
oOOoOOO0oo0 = lisp . lisp_ip_checksum ( oOOoOOO0oo0 , 24 )
if 94 - 94: Ii1I - I1ii11iIi11i + o0oOOo0O0Ooo - Oo0Ooo
if 15 - 15: OOooOOo
if 31 - 31: iII111i / i1IIi . OoO0O00
if 83 - 83: oO0o / iIii1I11I1II1 + i1IIi / iII111i
if 47 - 47: oO0o + OoooooooOO . II111iiii . iII111i
I1i11II = "\x11\x64\x00\x00" + "\x00\x00\x00\x00" + "\x02\x3c\x00\x00"
I1i11II = lisp . lisp_igmp_checksum ( I1i11II )
if 66 - 66: ooOoO0o * OoOoOO00
if 2 - 2: oO0o . I1Ii111 * Oo0Ooo + O0 - I11i * iIii1I11I1II1
if 12 - 12: o0oOOo0O0Ooo * I1Ii111 % II111iiii * i1IIi * iIii1I11I1II1
if 81 - 81: Oo0Ooo - I11i
if 24 - 24: OoooooooOO . OoO0O00 * II111iiii
o0oOO000oO0oo = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
i1OOO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
for Ii1iIiII1ii1 in lisp . lisp_gleaned_groups :
o0oOO000oO0oo . store_address ( Ii1iIiII1ii1 )
for OO00o0O0O000o in lisp . lisp_gleaned_groups [ Ii1iIiII1ii1 ] :
i1OOO . store_address ( OO00o0O0O000o )
i1I11IiI1iiII , o00oOo0oOoo , oO0o00O0O0oo0 = lisp . lisp_allow_gleaning ( o0oOO000oO0oo , i1OOO , None )
if ( oO0o00O0O0oo0 == False ) : continue
oOo00OoO0O ( lisp_raw_socket , o0oOO000oO0oo , i1OOO , oOOoOOO0oo0 + I1i11II )
if 24 - 24: I1Ii111 * oO0o
if 88 - 88: i11iIiiIii + iII111i * OoOoOO00 * iII111i + I11i
if 88 - 88: OOooOOo % Oo0Ooo - iII111i - OoOoOO00 % i11iIiiIii
if 6 - 6: Ii1I - OoO0O00 . I1IiiI - O0
if 16 - 16: iII111i * iII111i % Ii1I % I1IiiI
if 48 - 48: OOooOOo / Ii1I % OoO0O00 / IiII / I1Ii111
if 89 - 89: I1Ii111 * oO0o
if 63 - 63: OoooooooOO * OoooooooOO % OoO0O00 + O0 / I1Ii111 + iIii1I11I1II1
if 72 - 72: OoOoOO00 * iIii1I11I1II1 % I11i
if 20 - 20: II111iiii % iIii1I11I1II1 + oO0o * II111iiii * OoO0O00 % OoO0O00
def iI1I1 ( ) :
o0oOO000oO0oo = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
i1OOO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 46 - 46: iIii1I11I1II1
I111iiiii1 = [ ]
for Ii1iIiII1ii1 in lisp . lisp_gleaned_groups :
for OO00o0O0O000o in lisp . lisp_gleaned_groups [ Ii1iIiII1ii1 ] :
OO0ooOoOO0OOo = lisp . lisp_gleaned_groups [ Ii1iIiII1ii1 ] [ OO00o0O0O000o ]
OooOoooo0000 = time . time ( ) - OO0ooOoOO0OOo
if ( OooOoooo0000 < lisp . LISP_IGMP_TIMEOUT_INTERVAL ) : continue
I111iiiii1 . append ( [ Ii1iIiII1ii1 , OO00o0O0O000o ] )
if 29 - 29: Ii1I - I1IiiI / I1IiiI * Ii1I * IiII . OOooOOo
if 80 - 80: iIii1I11I1II1
if 23 - 23: II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
o0OO00oo0O = lisp . bold ( "timed out" , False )
for Ii1iIiII1ii1 , OO00o0O0O000o in I111iiiii1 :
o0oOO000oO0oo . store_address ( Ii1iIiII1ii1 )
i1OOO . store_address ( OO00o0O0O000o )
o000O0o = lisp . green ( Ii1iIiII1ii1 , False )
Ii1I1i111 = lisp . green ( OO00o0O0O000o , False )
lisp . lprint ( "{} RLE {} for gleaned group {}" . format ( o000O0o , o0OO00oo0O , Ii1I1i111 ) )
lisp . lisp_remove_gleaned_multicast ( o0oOO000oO0oo , i1OOO )
if 57 - 57: oO0o . I1IiiI
if 6 - 6: ooOoO0o
if 39 - 39: ooOoO0o / O0 * IiII
if 17 - 17: Ii1I / iIii1I11I1II1 - OoO0O00 + I1IiiI % OOooOOo
if 14 - 14: o0oOOo0O0Ooo % IiII + I1ii11iIi11i + OoO0O00
if 76 - 76: OoO0O00 - i11iIiiIii + OoOoOO00 + OOooOOo / OoooooooOO
if 50 - 50: II111iiii - I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1
if 91 - 91: II111iiii - O0 . iIii1I11I1II1 . O0 + I1ii11iIi11i - II111iiii
def iiIiiIi1 ( lisp_raw_socket ) :
lisp . lisp_set_exception ( )
if 30 - 30: OOooOOo + II111iiii - IiII * OoooooooOO
if 19 - 19: IiII - o0oOOo0O0Ooo . iIii1I11I1II1 . OoOoOO00 / OOooOOo
if 87 - 87: OoOoOO00 - ooOoO0o - OOooOOo + Oo0Ooo % iIii1I11I1II1 / i11iIiiIii
if 12 - 12: ooOoO0o
for oOo0oO in lisp . lisp_crypto_keys_by_nonce . values ( ) :
for oOOO0ooOO in oOo0oO : del ( oOOO0ooOO )
if 3 - 3: OoooooooOO
lisp . lisp_crypto_keys_by_nonce . clear ( )
lisp . lisp_crypto_keys_by_nonce = { }
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
lisp . lisp_rtr_nat_trace_cache . clear ( )
lisp . lisp_rtr_nat_trace_cache = { }
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
if 74 - 74: oO0o
if 34 - 34: iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
iI1I1 ( )
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
OooOooO0O0o0 ( lisp_raw_socket )
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
oOOoo00O0O = threading . Timer ( 60 , iiIiiIi1 ,
[ lisp_raw_socket ] )
oOOoo00O0O . start ( )
return
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
if 36 - 36: I11i % OOooOOo
if 72 - 72: I1IiiI / iII111i - O0 + I11i
if 83 - 83: O0
if 89 - 89: Oo0Ooo + I1ii11iIi11i - o0oOOo0O0Ooo
if 40 - 40: OoO0O00 + OoO0O00
def o0oo0o00ooO00 ( ) :
global Ii1iI , Oo0o , I1Ii11I1Ii1i
global o0oOoO00o , i1 , O0ooo00OOo00
global Oo , OOO0o0o
global iIiiI1
if 37 - 37: OoO0O00 - I1ii11iIi11i . OoooooooOO . ooOoO0o + OoOoOO00 / Ii1I
lisp . lisp_i_am ( "rtr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "RTR starting up" )
if 15 - 15: IiII . i1IIi * OoOoOO00 % iIii1I11I1II1
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
if 24 - 24: ooOoO0o - I11i * oO0o
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
if 79 - 79: IiII % OoO0O00
if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII
if 32 - 32: O0 . OoooooooOO
if 15 - 15: I1IiiI . OoO0O00
if 17 - 17: i11iIiiIii / Oo0Ooo . OoO0O00 / I1IiiI
if 38 - 38: i1IIi . I1ii11iIi11i % Ii1I + iIii1I11I1II1 + O0
iIiiI1 = lisp . lisp_myrlocs [ 0 ]
if ( lisp . lisp_on_aws ( ) ) :
iIi1i11 = lisp . bold ( "AWS RTR" , False )
Oo0OoO00oOO0o = None
for II1iiI in [ "eth0" , "ens5" ] :
Oo0OoO00oOO0o = lisp . lisp_get_interface_address ( II1iiI )
if ( Oo0OoO00oOO0o != None ) : break
if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o
if ( Oo0OoO00oOO0o != None ) :
iIiiI1 = Oo0OoO00oOO0o
oO00OOoO00 = Oo0OoO00oOO0o . print_address_no_iid ( )
lisp . lprint ( "{} using RLOC {} on {}" . format ( iIi1i11 , oO00OOoO00 , II1iiI ) )
else :
oO00OOoO00 = iIiiI1 . print_address_no_iid ( )
lisp . lprint ( "{} cannot obtain RLOC, using {}" . format ( iIi1i11 , oO00OOoO00 ) )
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
if 94 - 94: iII111i - Oo0Ooo + oO0o
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
Iiii1Ii = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
I1Ii11I1Ii1i = lisp . lisp_open_listen_socket ( Iiii1Ii ,
str ( Ooo ) )
Ii1iI = lisp . lisp_open_listen_socket ( "" , "lisp-rtr" )
Oo = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
if 62 - 62: i1IIi % OoOoOO00
Oo0o [ 0 ] = I1Ii11I1Ii1i
if 37 - 37: I11i * i1IIi
Oo0o [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
Oo0o [ 2 ] = Ii1iI
if 20 - 20: IiII + OoOoOO00 - OOooOOo - OOooOOo - I1ii11iIi11i
if 7 - 7: O0
if 26 - 26: o0oOOo0O0Ooo / OoooooooOO % ooOoO0o % OOooOOo
if 54 - 54: OoOoOO00 - I1Ii111
if 65 - 65: I1Ii111 . ooOoO0o + OOooOOo / Oo0Ooo + IiII % i1IIi
if 28 - 28: i11iIiiIii + O0 / I1ii11iIi11i
if 3 - 3: OoO0O00 * i1IIi . I1IiiI . O0 - OoOoOO00
if 81 - 81: I1IiiI - iIii1I11I1II1 / I1IiiI / O0
if 34 - 34: Ii1I * Ii1I - I1ii11iIi11i - O0 . i11iIiiIii
o0oOoO00o = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
o0oOoO00o . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
Oo0o . append ( o0oOoO00o )
if 32 - 32: iIii1I11I1II1 . OoO0O00 * oO0o / OOooOOo . II111iiii - Oo0Ooo
if 10 - 10: I1ii11iIi11i / i11iIiiIii - Ii1I + oO0o * I1IiiI
if 94 - 94: I1IiiI + iIii1I11I1II1 / O0 - OoooooooOO % I1ii11iIi11i
if 64 - 64: I11i + OoO0O00
if 25 - 25: I1IiiI . ooOoO0o + I1IiiI % Ii1I * iIii1I11I1II1
OOO0o0o = lisp . lisp_open_listen_socket ( "0.0.0.0" ,
str ( lisp . LISP_TRACE_PORT ) )
if 31 - 31: i11iIiiIii + OOooOOo - O0
if ( lisp . lisp_is_raspbian ( ) == False ) :
i1 = socket . socket ( socket . AF_INET6 , socket . SOCK_RAW ,
socket . IPPROTO_UDP )
if 51 - 51: OoO0O00 * i1IIi / Ii1I * OOooOOo + ooOoO0o % I1ii11iIi11i
if 34 - 34: oO0o * OoooooooOO + Ii1I + i11iIiiIii
iiIi = os . getenv ( "LISP_PCAP_THREADS" )
iiIi = 1 if ( iiIi == None ) else int ( iiIi )
O0oo0 = os . getenv ( "LISP_WORKER_THREADS" )
O0oo0 = 0 if ( O0oo0 == None ) else int ( O0oo0 )
if 37 - 37: i11iIiiIii
if 12 - 12: I1ii11iIi11i / Ii1I
if 5 - 5: OoooooooOO
if 18 - 18: I1IiiI % OoooooooOO - iII111i . i11iIiiIii * Oo0Ooo % Ii1I
for Ii1I1 in range ( iiIi ) :
O0oo00oOOO0o = lisp . lisp_thread ( "pcap-{}" . format ( Ii1I1 ) )
O0oo00oOOO0o . thread_number = Ii1I1
O0oo00oOOO0o . number_of_pcap_threads = iiIi
O0oo00oOOO0o . number_of_worker_threads = O0oo0
O0ooo00OOo00 . append ( O0oo00oOOO0o )
threading . Thread ( target = iI1 , args = [ O0oo00oOOO0o ] ) . start ( )
if 5 - 5: o0oOOo0O0Ooo / I1IiiI % Ii1I . IiII
if 86 - 86: i1IIi * OoOoOO00 . O0 - Ii1I - o0oOOo0O0Ooo - OoOoOO00
if 47 - 47: OOooOOo + I11i
if 50 - 50: I1Ii111 + I1ii11iIi11i
if 4 - 4: IiII / Oo0Ooo
if 31 - 31: I1Ii111 - I1ii11iIi11i + O0 . Oo0Ooo + OoOoOO00 - oO0o
for Ii1I1 in range ( O0oo0 ) :
O0oo00oOOO0o = lisp . lisp_thread ( "worker-{}" . format ( Ii1I1 ) )
O0ooo00OOo00 . append ( O0oo00oOOO0o )
threading . Thread ( target = iIiIi1ii , args = [ O0oo00oOOO0o ] ) . start ( )
if 43 - 43: iII111i + Oo0Ooo / OoooooooOO
if 24 - 24: O0 + o0oOOo0O0Ooo * Ii1I - I1Ii111
if 10 - 10: i11iIiiIii
if 21 - 21: I1IiiI / iII111i
if 69 - 69: ooOoO0o % ooOoO0o
lisp . lisp_load_checkpoint ( )
if 76 - 76: i11iIiiIii * iII111i / OoO0O00 % I1ii11iIi11i + OOooOOo
if 48 - 48: iIii1I11I1II1 % i1IIi + OoOoOO00 % o0oOOo0O0Ooo
if 79 - 79: OoOoOO00 % I1IiiI % Ii1I / i1IIi % OoO0O00
if 56 - 56: iIii1I11I1II1 - i11iIiiIii * iII111i
lisp . lisp_load_split_pings = ( os . getenv ( "LISP_LOAD_SPLIT_PINGS" ) != None )
if 84 - 84: OOooOOo + Ii1I + o0oOOo0O0Ooo
if 33 - 33: Ii1I
if 93 - 93: ooOoO0o
if 34 - 34: oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
oOOoo00O0O = threading . Timer ( 60 , iiIiiIi1 ,
[ o0oOoO00o ] )
oOOoo00O0O . start ( )
return ( True )
if 19 - 19: I1ii11iIi11i
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
def iI11II1i1I1 ( ) :
if 72 - 72: iII111i - OoooooooOO
if 25 - 25: OoOoOO00 % OoooooooOO * Oo0Ooo - i1IIi * II111iiii * oO0o
if 30 - 30: I11i % OoOoOO00 / I1ii11iIi11i * O0 * Ii1I . I1IiiI
if 46 - 46: OoOoOO00 - O0
lisp . lisp_close_socket ( Oo0o [ 0 ] , "" )
lisp . lisp_close_socket ( Oo0o [ 1 ] , "" )
lisp . lisp_close_socket ( Ii1iI , "lisp-rtr" )
lisp . lisp_close_socket ( I1Ii11I1Ii1i , "" )
lisp . lisp_close_socket ( OOO0o0o , "" )
lisp . lisp_close_socket ( Oo , "lispers.net-itr" )
o0oOoO00o . close ( )
return
if 70 - 70: I11i + Oo0Ooo * iIii1I11I1II1 . I1IiiI * I11i
if 49 - 49: o0oOOo0O0Ooo
if 25 - 25: iII111i . OoooooooOO * iIii1I11I1II1 . o0oOOo0O0Ooo / O0 + Ii1I
if 68 - 68: Oo0Ooo
if 22 - 22: OOooOOo
if 22 - 22: iII111i * I11i - Oo0Ooo * O0 / i11iIiiIii
if 78 - 78: Oo0Ooo * O0 / ooOoO0o + OoooooooOO + OOooOOo
def I1iiIiiIiiI ( kv_pair ) :
global Oo0o
global Ooo
if 94 - 94: i1IIi
lispconfig . lisp_map_resolver_command ( kv_pair )
if 36 - 36: I1IiiI + Oo0Ooo
if ( lisp . lisp_test_mr_timer == None or
lisp . lisp_test_mr_timer . is_alive ( ) == False ) :
lisp . lisp_test_mr_timer = threading . Timer ( 2 , lisp . lisp_test_mr ,
[ Oo0o , Ooo ] )
lisp . lisp_test_mr_timer . start ( )
if 46 - 46: iII111i
return
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
if 65 - 65: ooOoO0o - i1IIi
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
def ooO0 ( kv_pair ) :
global I1Ii11I1Ii1i , o0oOoO00o , Ooo
if 94 - 94: I11i . I1IiiI
oooO = lisp . lisp_rloc_probing
if 64 - 64: O0 % ooOoO0o
if 40 - 40: o0oOOo0O0Ooo + I11i
if 77 - 77: i11iIiiIii % IiII + I1Ii111 % OoooooooOO - I11i
if 26 - 26: Oo0Ooo + O0 - iIii1I11I1II1
lispconfig . lisp_xtr_command ( kv_pair )
if 47 - 47: OoooooooOO
if 2 - 2: OoOoOO00 % I1Ii111 * Oo0Ooo * OoOoOO00
if 65 - 65: i11iIiiIii + Oo0Ooo * OoooooooOO - OoO0O00
if 26 - 26: o0oOOo0O0Ooo % OOooOOo + OOooOOo % I11i * i11iIiiIii / iII111i
if 64 - 64: oO0o % OoOoOO00 / II111iiii % ooOoO0o - iII111i
if ( oooO == False and lisp . lisp_rloc_probing ) :
i1I = [ I1Ii11I1Ii1i , I1Ii11I1Ii1i ,
None , o0oOoO00o ]
lisp . lisp_start_rloc_probe_timer ( 1 , i1I )
oO0Oo = { "type" : "itr-crypto-port" , "port" : Ooo }
lisp . lisp_write_to_dp_socket ( oO0Oo )
if 2 - 2: I1Ii111 - I1ii11iIi11i + o0oOOo0O0Ooo * OoO0O00 / iII111i
if 26 - 26: OOooOOo * Oo0Ooo
if 31 - 31: I11i * oO0o . Ii1I
if 35 - 35: I11i
if 94 - 94: ooOoO0o / i11iIiiIii % O0
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 70 - 70: I11i - Oo0Ooo / OoooooooOO % OoooooooOO
if 95 - 95: OoooooooOO % OoooooooOO . Ii1I
if 26 - 26: oO0o + IiII - II111iiii . II111iiii + I1ii11iIi11i + OoOoOO00
if 68 - 68: O0
if 76 - 76: I1ii11iIi11i
ooO000OO = {
"lisp xtr-parameters" : [ ooO0 , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"dynamic-eid-device" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-resolver" : [ I1iiIiiIiiI , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"subscribe-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp rtr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp json" : [ lispconfig . lisp_json_command , {
"json-name" : [ False ] ,
"json-string" : [ False ] } ] ,
"lisp database-mapping" : [ i11 , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp glean-mapping" : [ O0O0O , {
"instance-id" : [ False ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc-prefix" : [ True ] ,
"rloc-probe" : [ True , "yes" , "no" ] ,
"igmp-query" : [ True , "yes" , "no" ] } ] ,
"show rtr-rloc-probing" : [ iiI1I11i1i , { } ] ,
"show rtr-keys" : [ o00oOO0 , { } ] ,
"show rtr-map-cache" : [ O00oooo0O , { } ] ,
"show rtr-map-cache-dns" : [ Ii1IOo0o0 , { } ]
}
if 43 - 43: ooOoO0o * I1Ii111 % OOooOOo
if 38 - 38: Oo0Ooo
if 34 - 34: OoOoOO00
if 70 - 70: iIii1I11I1II1 * IiII - OOooOOo / Oo0Ooo % oO0o
if 66 - 66: OoooooooOO + ooOoO0o * iII111i
if 2 - 2: iII111i . OoO0O00 / oO0o
def II ( lisp_socket ) :
if 94 - 94: IiII * I11i * OoooooooOO / o0oOOo0O0Ooo . IiII - o0oOOo0O0Ooo
if 13 - 13: OOooOOo / IiII - OoO0O00 / OOooOOo . i1IIi
if 22 - 22: O0 - I11i + I1Ii111 . Ii1I * i1IIi
if 26 - 26: iIii1I11I1II1 * o0oOOo0O0Ooo . I11i
I11III11III1 , OOOo , OoOO , Ooooo00o0OoO = lisp . lisp_receive ( lisp_socket , False )
oooOoO00OooO0 = lisp . lisp_trace ( )
if ( oooOoO00OooO0 . decode ( Ooooo00o0OoO ) == False ) : return
if 98 - 98: OOooOOo + Ii1I
if 52 - 52: Oo0Ooo / OoOoOO00 - I1Ii111 . iII111i
if 50 - 50: iIii1I11I1II1 - iII111i - I11i
if 60 - 60: iIii1I11I1II1 * ooOoO0o
if 71 - 71: OoOoOO00 % Oo0Ooo % ooOoO0o
oooOoO00OooO0 . rtr_cache_nat_trace ( OOOo , OoOO )
if 34 - 34: I11i / I11i % IiII . OoOoOO00 / Oo0Ooo
if 99 - 99: ooOoO0o * I1IiiI - ooOoO0o % Ii1I
if 40 - 40: OOooOOo / IiII / iIii1I11I1II1 + Ii1I
if 59 - 59: I11i * OoooooooOO + OOooOOo . iIii1I11I1II1 / i1IIi
if 75 - 75: I11i . OOooOOo - iIii1I11I1II1 * OoO0O00 * iII111i
if 93 - 93: ooOoO0o
if 18 - 18: ooOoO0o
if ( o0oo0o00ooO00 ( ) == False ) :
lisp . lprint ( "lisp_rtr_startup() failed" )
lisp . lisp_print_banner ( "RTR abnormal exit" )
exit ( 1 )
if 66 - 66: oO0o * i11iIiiIii + OoOoOO00 / OOooOOo
if 96 - 96: OOooOOo + OOooOOo % IiII % OOooOOo
IiiI1I = [ I1Ii11I1Ii1i , Ii1iI ,
Oo , OOO0o0o ]
Ii11iIII = [ I1Ii11I1Ii1i ] * 3
if 58 - 58: O0
while ( True ) :
try : O0oO , OOO00o0 , i1I11IiI1iiII = select . select ( IiiI1I , [ ] , [ ] )
except : break
if 97 - 97: I1ii11iIi11i / I1ii11iIi11i / iIii1I11I1II1 % i1IIi . I1ii11iIi11i . IiII
if 4 - 4: Oo0Ooo - OoO0O00 - i11iIiiIii * I1Ii111 / Ii1I - OOooOOo
if 45 - 45: o0oOOo0O0Ooo % Oo0Ooo * i1IIi - O0
if 82 - 82: II111iiii / iII111i
if ( lisp . lisp_ipc_data_plane and Oo in O0oO ) :
lisp . lisp_process_punt ( Oo , Oo0o ,
Ooo )
if 96 - 96: Oo0Ooo / oO0o . II111iiii . Oo0Ooo
if 91 - 91: II111iiii . OOooOOo + o0oOOo0O0Ooo
if 8 - 8: OOooOOo * Oo0Ooo / iII111i - OoO0O00 - OoooooooOO
if 100 - 100: oO0o . iIii1I11I1II1 . iIii1I11I1II1
if 55 - 55: oO0o
if ( OOO0o0o in O0oO ) :
II ( OOO0o0o )
if 37 - 37: IiII / i11iIiiIii / Oo0Ooo
if 97 - 97: I1Ii111 . I11i / I1IiiI
if 83 - 83: I11i - I1ii11iIi11i * oO0o
if 90 - 90: Oo0Ooo * I1IiiI
if 75 - 75: I1ii11iIi11i - OoOoOO00 * i11iIiiIii . OoooooooOO - Oo0Ooo . I11i
if ( I1Ii11I1Ii1i in O0oO ) :
I11III11III1 , OOOo , OoOO , Ooooo00o0OoO = lisp . lisp_receive ( Ii11iIII [ 0 ] ,
False )
if ( OOOo == "" ) : break
if ( lisp . lisp_is_rloc_probe_request ( Ooooo00o0OoO [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe request, using pcap" )
continue
if 6 - 6: I11i * oO0o / OoooooooOO % Ii1I * o0oOOo0O0Ooo
if ( lisp . lisp_is_rloc_probe_reply ( Ooooo00o0OoO [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe reply, using pcap" )
continue
if 28 - 28: IiII * I1IiiI % IiII
lisp . lisp_parse_packet ( Ii11iIII , Ooooo00o0OoO , OOOo , OoOO )
if 95 - 95: O0 / I11i . I1Ii111
if 17 - 17: I11i
if 56 - 56: ooOoO0o * o0oOOo0O0Ooo + I11i
if 48 - 48: IiII * OoO0O00 % I1Ii111 - I11i
if 72 - 72: i1IIi % ooOoO0o % IiII % oO0o - oO0o
if 97 - 97: o0oOOo0O0Ooo * O0 / o0oOOo0O0Ooo * OoO0O00 * Oo0Ooo
if ( Ii1iI in O0oO ) :
I11III11III1 , OOOo , OoOO , Ooooo00o0OoO = lisp . lisp_receive ( Ii1iI , True )
if 38 - 38: I1Ii111
if ( OOOo == "" ) : break
if 25 - 25: iIii1I11I1II1 % II111iiii / I11i / I1ii11iIi11i
if ( I11III11III1 == "command" ) :
if ( Ooooo00o0OoO == "clear" ) :
lisp . lisp_clear_map_cache ( )
continue
if 22 - 22: oO0o * iII111i
if ( Ooooo00o0OoO . find ( "clear%" ) != - 1 ) :
lispconfig . lisp_clear_decap_stats ( Ooooo00o0OoO )
continue
if 4 - 4: OoOoOO00 - oO0o + I1IiiI
lispconfig . lisp_process_command ( Ii1iI , I11III11III1 ,
Ooooo00o0OoO , "lisp-rtr" , [ ooO000OO ] )
elif ( I11III11III1 == "api" ) :
lisp . lisp_process_api ( "lisp-rtr" , Ii1iI , Ooooo00o0OoO )
elif ( I11III11III1 == "data-packet" ) :
IIIii ( Ooooo00o0OoO , "" )
else :
if ( lisp . lisp_is_rloc_probe_request ( Ooooo00o0OoO [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe request, using pcap" )
continue
if 36 - 36: IiII
if ( lisp . lisp_is_rloc_probe_reply ( Ooooo00o0OoO [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe reply, using pcap" )
continue
if 19 - 19: OoOoOO00 . o0oOOo0O0Ooo . OoooooooOO
lisp . lisp_parse_packet ( Oo0o , Ooooo00o0OoO , OOOo , OoOO )
if 13 - 13: OOooOOo . Oo0Ooo / II111iiii
if 43 - 43: iIii1I11I1II1 % OoO0O00
if 84 - 84: Oo0Ooo
if 44 - 44: OoooooooOO * i11iIiiIii / Oo0Ooo
iI11II1i1I1 ( )
lisp . lisp_print_banner ( "RTR normal exit" )
exit ( 0 )
if 75 - 75: OoooooooOO . OOooOOo + OoO0O00 / Ii1I - I1IiiI % Ii1I
if 89 - 89: iII111i * iIii1I11I1II1 + i11iIiiIii . OoooooooOO
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
ttsx.py
|
from threading import Thread
import pyttsx3
def do_ttsx(s, voice, non_blocking=True):
def _do_say():
engine = pyttsx3.init()
engine.setProperty('voice', voice)
engine.say(s)
engine.runAndWait()
if non_blocking:
t = Thread(target=_do_say)
t.daemon = True
t.start()
else:
_do_say()
|
provisioning.py
|
#!/usr/bin/env python3
import fileinput
import inspect
import os
import re
import subprocess
import time
from datetime import datetime
from multiprocessing import Process
from subprocess import PIPE
env = dict(os.environ)
env['PATH'] = f"{env['PATH']}:/usr/local/bin"
env['BRANCH'] = 'master' if not env.get('BRANCH') else env['BRANCH']
def _print_line_number(number_of_outer_frame=1):
cf = inspect.currentframe()
frame = cf
for ii in range(number_of_outer_frame):
frame = frame.f_back
timestamp = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
print('\n'.join(['#' * 40, '[%s] LINE NUMBER: %d' % (timestamp, frame.f_lineno), '#' * 40]))
def _run(cmd, file_path_name=None, cwd=None):
def _f():
if not file_path_name:
_p = subprocess.Popen(cmd, cwd=cwd, env=env)
_p.communicate()
if _p.returncode != 0:
raise Exception()
else:
with open(file_path_name, 'a') as f:
_p = subprocess.Popen(cmd, stdout=f, cwd=cwd, env=env)
_p.communicate()
if _p.returncode != 0:
raise Exception()
_print_line_number(2)
cmd_string = ' '.join(cmd)
timestamp = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
print('\n'.join(['#' * 40, '[%s] COMMAND: %s' % (timestamp, cmd_string), '#' * 40]))
pp = Process(target=_f)
pp.start()
pp.join()
if pp.exitcode != 0:
raise Exception()
def _file_line_replace(file_path_name, str_old, str_new, backup='.bak'):
with fileinput.FileInput(file_path_name, inplace=True, backup=backup) as f:
for line in f:
new_line = re.sub(str_old, str_new, line)
print(new_line, end='')
def _settings_file_line_replace(settings_file_path_name, key, value, backup='.bak'):
with fileinput.FileInput(settings_file_path_name, inplace=True, backup=backup) as f:
for line in f:
new_line = re.sub('^(' + key + ') .*', '\\1 = \'%s\'' % value, line)
print(new_line, end='')
def _read_settings_file(settings_file_path_name):
sdd = {}
with open(settings_file_path_name) as ff:
for line in ff:
key, value = line.partition("=")[::2]
sdd[key.strip()] = value.strip().strip("'")
return sdd
def _preprocess(hostname):
_print_line_number()
_file_line_replace('/etc/sysconfig/network', '^HOSTNAME=localhost.localdomain$', 'HOSTNAME=%s' % hostname)
with open('/etc/hosts', 'a') as f:
f.write('127.0.0.1 %s\n' % hostname)
_run(['hostname', hostname])
_run(['/etc/init.d/network', 'restart'])
_print_line_number()
with open('/etc/server_info', 'w') as f:
f.write('AWS_EC2_INSTANCE_ID=i-01234567\n')
f.write('AWS_EC2_AVAILABILITY_ZONE=my-local-1a\n')
_print_line_number()
_run(['fallocate', '-l', '2G', '/swapfile'])
_run(['chmod', '600', '/swapfile'])
_run(['mkswap', '/swapfile'])
_run(['swapon', '/swapfile'])
with open('/etc/fstab', 'a') as f:
f.write('/swapfile swap swap sw 0 0\n')
_print_line_number()
subprocess.Popen(['chpasswd'], stdin=PIPE).communicate(b'root:1234qwer')
_file_line_replace('/etc/ssh/sshd_config', '^#PermitRootLogin yes$', 'PermitRootLogin yes')
_file_line_replace('/etc/ssh/sshd_config', '^PasswordAuthentication no$', 'PasswordAuthentication yes')
_run(['service', 'sshd', 'restart'])
_print_line_number()
file_path_name = '/vagrant/requirements_rpm.txt'
if os.path.exists(file_path_name):
with open(file_path_name, 'r') as f:
lines = f.readlines()
for ll in lines:
_run(['yum', '-y', 'install', ll.strip()])
_print_line_number()
_run(['/usr/bin/pip-3.8', 'install', '-U', 'pip'])
file_path_name = '/vagrant/requirements.txt'
if os.path.exists(file_path_name):
with open(file_path_name, 'r') as f:
lines = f.readlines()
for ll in lines:
_run(['pip3', 'install', ll.strip()])
_print_line_number()
_run(['pip3', 'uninstall', '-y', 'awscli'])
_run(['wget', '-O', 'awscliv2.zip', 'https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip'], cwd='/root')
_run(['unzip', 'awscliv2.zip'], cwd='/root')
_run(['./aws/install'], cwd='/root')
_print_line_number()
node_version = 'v14.18.0'
_run(['wget', 'https://raw.githubusercontent.com/nvm-sh/nvm/v0.38.0/install.sh'], cwd='/root')
_run(['chmod', '+x', 'install.sh'], cwd='/root')
_run(['./install.sh'], cwd='/root')
with open('/root/.npmrc', 'w') as f:
f.write('unsafe-perm=true\n')
f.write('user=root')
with open('/root/install.sh', 'w') as f:
f.write('#!/usr/bin/env bash\n')
f.write('source /root/.nvm/nvm.sh\n')
# remove this node mirror setting if there is any problems
f.write('export NVM_NODEJS_ORG_MIRROR=https://npm.taobao.org/mirrors/node/\n')
f.write('nvm install %s\n' % node_version)
f.write('nvm alias default %s\n' % node_version)
f.write('nvm use default %s\n' % node_version)
_run(['chmod', '+x', 'install.sh'], cwd='/root')
_run(['./install.sh'], cwd='/root')
env['NVM_BIN'] = '/root/.nvm/versions/node/%s/bin' % node_version
env['NVM_CD_FLAGS'] = ''
env['NVM_DIR'] = '/root/.nvm'
env['NVM_RC_VERSION'] = ''
env['PATH'] = ('/root/.nvm/versions/node/%s/bin:' % node_version) + env['PATH']
_print_line_number()
_run(['wget', '-O', 'install.sh', 'https://sentry.io/get-cli/'], cwd='/root')
_run(['chmod', '+x', 'install.sh'], cwd='/root')
_run(['./install.sh'], cwd='/root')
_print_line_number()
def main():
hostname = 'dv-johanna-my-local-1a-012345.localdomain'
_run(['cp', '--backup', '/vagrant/configuration/root/.bashrc', '/root/.bashrc'])
_preprocess(hostname)
_print_line_number()
_run(['mkdir', '-p', '/root/.ssh'])
_run(['mkdir', '-p', '/var/log/johanna'])
_print_line_number()
cmd_common = ['cp', '--backup']
file_list = list()
file_list.append('/root/.ssh/id_rsa')
for ff in file_list:
cmd = cmd_common + ['/vagrant/configuration' + ff, ff]
_run(cmd)
_print_line_number()
_run(['chmod', '600', '/root/.ssh/id_rsa'])
is_success = False
for ii in range(10):
print('Git clone try count: %d' % (ii + 1))
# noinspection PyBroadException
try:
# Non interactive git clone (ssh fingerprint prompt)
_run(['ssh-keyscan', 'github.com'], '/root/.ssh/known_hosts')
print(f'branch: {env["BRANCH"]}')
_run(['git', 'clone', '--depth=1', '-b', env['BRANCH'], 'git@github.com:HardBoiledSmith/johanna.git'],
cwd='/opt')
if os.path.exists('/opt/johanna'):
is_success = True
break
except Exception:
time.sleep(3)
if not is_success:
raise Exception()
_print_line_number()
cmd_common = ['mv']
file_list = list()
file_list.append('/opt/johanna/config.json')
local_config_path = '/vagrant/opt/johanna/config.json'
for ff in file_list:
cmd = cmd_common + [local_config_path, ff]
_run(cmd)
_print_line_number()
if __name__ == "__main__":
main()
|
base_camera.py
|
import time
import threading
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
class CameraEvent(object):
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
event = CameraEvent()
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
if BaseCamera.thread is None:
BaseCamera.last_access = time.time()
# start background frame thread
BaseCamera.thread = threading.Thread(target=self._thread)
BaseCamera.thread.start()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
def get_frame(self):
"""Return the current camera frame."""
BaseCamera.last_access = time.time()
# wait for a signal from the camera thread
BaseCamera.event.wait()
BaseCamera.event.clear()
return BaseCamera.frame
@staticmethod
def frames():
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
@classmethod
def _thread(cls):
"""Camera background thread."""
print('Starting camera thread.')
frames_iterator = cls.frames()
for frame in frames_iterator:
BaseCamera.frame = frame
BaseCamera.event.set() # send signal to clients
time.sleep(0)
# if there hasn't been any clients asking for frames in
# the last 10 seconds then stop the thread
if time.time() - BaseCamera.last_access > .5:
frames_iterator.close()
print('Stopping camera thread due to inactivity.')
break
BaseCamera.thread = None
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assertRaises
from common import run_in_spawned_process
from nose.tools import assert_raises, ok_
import unittest
import os
def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4):
dshape = (N, T, I)
data = mx.sym.Variable('data')
Y1, _ = cell1.unroll(T, data, layout='NTC', merge_outputs=True)
mod1 = mx.mod.Module(Y1, label_names=None, context=default_context())
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
Y2, _ = cell2.unroll(T, data, layout='NTC', merge_outputs=True)
mod2 = mx.mod.Module(Y2, label_names=None, context=default_context())
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
x = mx.random.uniform(shape=dshape)
batch=mx.io.DataBatch(data=[x])
# check inference
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
# check training
mod1.forward(batch, is_train=True)
mod2.forward(batch, is_train=True)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
dy = mx.random.uniform(shape=mod1.get_outputs()[0].shape)
mod1.backward(out_grads=[dy])
mod2.backward(out_grads=[dy])
if grad_req != 'null':
assert_allclose(mod1.get_input_grads()[0].asnumpy(), mod2.get_input_grads()[0].asnumpy(), rtol=rtol, atol=atol)
else:
assert(mod1.get_input_grads()[0] == None)
assert(mod2.get_input_grads()[0] == None)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='lstm', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(H, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l1_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='lstm',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l0_'),
mx.rnn.LSTMCell(H, prefix='r0_'),
output_prefix='bi_lstm_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l1_'),
mx.rnn.LSTMCell(H, prefix='r1_'),
output_prefix='bi_lstm_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='gru', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(H, prefix='l0_'))
stack.add(mx.rnn.GRUCell(H, prefix='l1_'))
stack.add(mx.rnn.GRUCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='gru',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l0_'),
mx.rnn.GRUCell(H, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l1_'),
mx.rnn.GRUCell(H, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_tanh', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_tanh',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r0_'),
output_prefix='bi_rnntanh_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r1_'),
output_prefix='bi_rnntanh_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_sym():
T, N, I, H = 5, 32, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_relu', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_bidirectional():
T, N, I, H = 5, 20, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_relu',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l0_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r0_'),
output_prefix='bi_rnnrelu_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l1_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r1_'),
output_prefix='bi_rnnrelu_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'add', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'null', rtol=1e-2, atol=1e-2)
@with_seed()
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
out1 = exec1.outputs[0].asnumpy()
exec1.forward(is_train=True)
out1 = exec1.outputs[0].asnumpy()
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a.asnumpy(), out_grad.asnumpy(), rtol=1e-5, atol=1e-5)
@with_seed()
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1.asnumpy(), ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad.asnumpy(), np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i].asnumpy(), gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i].asnumpy(), gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
@with_seed()
def test_regression():
''' test regression operator '''
def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
# init executor
data = mx.symbol.Variable('data')
label = mx.symbol.Variable('label', stype=stype)
out = symbol(data, label)
grad_req = {'data': 'write', 'label': 'null'}
out_exec = out.simple_bind(default_context(), grad_req=grad_req,
data=shape, label=shape)
arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
# init data
arr_data = mx.random.uniform(-1, 1, shape)
arg_map["data"][:] = arr_data
# init label based on density
arr_label = arg_map["label"]
atol = 1e-5
for density in densities:
arr_label[:] = rand_ndarray(shape, stype, density=density)
out_exec.forward(is_train=True)
out_exec.backward()
np_out = forward(arr_data.asnumpy())
out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(out_exec.outputs[0].asnumpy(), np_out, atol=atol)
assert_almost_equal(grad_map["data"].asnumpy(), out_grad, atol=atol)
shape = (50, 30)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape)
check_regression(mx.symbol.MAERegressionOutput,
lambda x: x,
lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
shape)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape, stype='csr')
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape, stype='csr')
def check_softmax_grad(xpu):
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.zeros((1,4))
expected_grad_out[0, k] = -1
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_smoothed_softmax_grad(xpu):
alpha = 0.2
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1))
expected_grad_out[0, k] = - (1 - alpha)
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_softmax_with_ignore_label(xpu):
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)
shape = (20, 10)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1]-1, (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
exec1.backward()
grad0 = grad.asnumpy()
for i in range(int(shape[0]/2)):
l_np[i] = 0
l[:] = l_np
exec1.forward(is_train=True)
exec1.backward()
grad1 = grad.asnumpy()
assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):])
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
# bind with label
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = mx.random.uniform(-1, 1, shape, ctx=xpu)
l = mx.random.uniform(-1, 1, shape, ctx=xpu)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# Non-zero atol required by test_softmax with seed 781663739
rtol = 1e-4
atol = 1e-6
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
exec1.backward()
assert_almost_equal(grad.asnumpy(), np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x.asnumpy(), exec1.outputs[0].asnumpy())
exec1.backward(dy)
assert_almost_equal(dy.asnumpy(), dx.asnumpy())
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0].asnumpy()
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_fully_connected():
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = mx.nd.random.uniform(shape=(5, 5, 5, 13), dtype=np.float32)
fc_weight = mx.nd.random.uniform(shape=(10, 325), dtype=np.float32)
fc_bias = mx.nd.random.uniform(shape=(10), dtype=np.float32)
fc_bias2 = mx.nd.random.uniform(shape=(10, 1), dtype=np.float32)
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np},
numeric_eps=1e-2, rtol=1e-4, atol=1e-2)
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0].asnumpy()
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0].asnumpy()
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0].asnumpy()
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out.asnumpy(), reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y.simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out.asnumpy(), reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0].asnumpy(), data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
assert_almost_equal(arr_grad2.asnumpy(), npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0].asnumpy(), rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy(), deconv_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
@with_seed()
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y.simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@with_seed()
def test_batchnorm():
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var):
print(str((op, shape, axis, cudnn_off)))
kwargs = dict(output_mean_var=output_mean_var)
if op == mx.nd.contrib.SyncBatchNorm:
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad()
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad()
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
for _ in range(num_iters):
data = mx.nd.random.uniform(shape=shape)
data.attach_grad()
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=False, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
running_var = running_var * momentum + \
data_var_flat * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
m = np.prod(shape) / shape[axis]
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
atol = 1e-2
rtol = 1e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(data.grad.asnumpy(),
dX.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_gamma.grad.asnumpy(), dW.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_beta.grad.asnumpy(), db.asnumpy(), atol=atol, rtol=rtol)
for op in [mx.nd.BatchNorm, mx.nd.contrib.SyncBatchNorm]:
for shape in [(24, 2), (24, 3, 4), (24, 4, 4, 4), (24, 5, 6, 4, 4)]:
for axis in range(len(shape)):
for cudnn_off in [False, True]:
for output_mean_var in [False, True]:
_test_batchnorm_impl(op, shape, axis,
cudnn_off, output_mean_var)
@with_seed()
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, num_groups, 1, 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out.reshape(dshape), mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, num_groups, 1, 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
gamma_grad = np.sum(x_hat * ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
x_hat_grad = ograd * gamma
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_groups,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-5, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-5, dtype=dtype)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@with_seed()
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv.bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv.bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
def test_reshape():
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
# Test new api (Using shape)
test_cases = [
[(2, 3, 5, 5), (0, -1), False, (2, 75)],
[(2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)],
[(5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)],
[(2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)],
[(2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)],
[(2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)],
[(2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)],
[(2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)],
[(2, 3, 5, 6), (-3, -3), False, (6, 30)],
[(2, 3, 5, 6), (-3, -1), False, (6, 30)],
[(64,), (-4, 16, 4), False, (16, 4)],
[(64,), (-4, 16, -1), False, (16, 4)],
[(64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)],
[(2, 3, 5, 5), (0, -1), True, (5, 30)],
[(2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)],
[(5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)],
[(2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)],
[(2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)],
[(2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)],
[(2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)],
[(2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)],
[(2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)],
[(2, 3, 5, 6), (-3, -3), True, (6, 30)],
[(64,), (16, 4, -4), True, (16, 4)],
[(64,), (16, -1, -4), True, (16, 4)],
[(1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)]]
for test_case in test_cases:
test_reshape_new(*test_case)
# Test old api
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0].asnumpy(), groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd.asnumpy(), grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad.asnumpy(), grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn.bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
dtypes = ['float32', 'float64']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'].asnumpy(),
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'].asnumpy(),
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0].asnumpy(), forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'].asnumpy(), grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'].asnumpy(), grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
@with_seed()
def test_support_vector_machine_l1_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)
assert_almost_equal(grad_np, grad.asnumpy())
@with_seed()
def test_support_vector_machine_l2_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
x_np = x_np.astype(np.float32)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
grad_np = grad_np.astype(np.float32)
assert_almost_equal(grad_np, grad.asnumpy())
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0].asnumpy()
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0].asnumpy(), np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd.asnumpy(), forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([4,5,6], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
is_windows = sys.platform.startswith('win')
for enforce_safe_acc in ["1", "0"]:
if is_windows:
if enforce_safe_acc == "0":
break
enforce_safe_acc = "0" if "MXNET_SAFE_ACCUMULATION" not in os.environ else os.environ["MXNET_SAFE_ACCUMULATION"]
else:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
def test_layer_norm():
for enforce_safe_acc in ["1", "0"]:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for dtype, forward_check_eps, backward_check_eps in zip([np.float16, np.float32, np.float64],
[1E-2, 1E-3, 1E-4],
[1E-2, 1E-3, 1E-4]):
if dtype != np.float16:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]
else:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10)], [True, True] # large input + fp16 does not pass the forward check
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
arr_grad1 = arr_grad1.asnumpy()
arr_grad2 = arr_grad2.asnumpy()
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
arr_grad = arr_grad.asnumpy()
# print(name)
# print(arr_grad)
# print(npout_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
# Test monitor on symbol using clip
def simple_callback(name, arr):
pass
exe = test.simple_bind(ctx=mx.current_context(), data=shape)
exe.set_monitor_callback(simple_callback, monitor_all=True)
exe.forward(is_train=True)
exe.backward(out_grads=mx.nd.ones(shape))
mx.nd.waitall()
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out.asnumpy())
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s.bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0].asnumpy(), np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0].asnumpy(), a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take():
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
def check_output_n_grad(data_shape, idx_shape, axis, mode):
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape, axis=axis, mode=mode)
data_real = np.random.normal(size=data_shape).astype('float32')
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), grad_in)
def check_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad.asnumpy())
for mode in ['clip', 'wrap']:
for data_ndim in range(1, 5):
for idx_ndim in range(1, 4):
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
check_output_n_grad(data_shape, idx_shape, axis, mode)
check_autograd_req()
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'].asnumpy(), grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'].asnumpy(), grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r.asnumpy(), data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0].asnumpy(), X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0].asnumpy(), X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
check_cast(mx.sym.amp_cast, input_np, expected_output)
@with_seed()
def test_amp_multicast():
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
@with_seed()
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z.bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z.bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats).asnumpy()
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis).asnumpy()
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
nparr = ndarr.asnumpy()
assert_almost_equal(nparr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
dtype_softmax_np = dtype_softmax.asnumpy()
ref_softmax_np = ref_softmax.asnumpy()
assert_almost_equal(dtype_softmax_np, ref_softmax_np, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
dtype_grad_np = dtype_input.grad.asnumpy()
ref_grad_np = ref_input.grad.asnumpy()
assert_almost_equal(dtype_grad_np, ref_grad_np, rtol=grad_rtol, atol=grad_atol)
import sys
is_windows = sys.platform.startswith('win')
enforce_safe_acc = os.environ.get("MXNET_SAFE_ACCUMULATION", "0")
if not is_windows or enforce_safe_acc == "1":
os.environ["MXNET_SAFE_ACCUMULATION"] = "1"
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)], rtol=1e-2, atol=1e-3, dtype="asnumpy")
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for _ in range(100):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
check_ctc_loss(acts, labels, true_loss)
check_contrib_ctc_loss(acts, labels, true_loss)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels2, true_loss)
check_contrib_ctc_loss(acts2, labels2, true_loss)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels3, true_loss)
check_contrib_ctc_loss(acts2, labels3, true_loss)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss.asnumpy(), expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
check_ctc_loss_grad('first')
check_ctc_loss_grad('last')
check_contrib_ctc_loss_grad('first')
check_contrib_ctc_loss_grad('last')
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
assert same(qa.asnumpy(), qa_real.asnumpy())
assert same(a_.asnumpy(), a_real.asnumpy())
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output.asnumpy(), expected_output.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(x2.grad.asnumpy(), expected_grad.asnumpy(), rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x.asnumpy(), np.ones(shape=(10, 10), dtype=np.float32))
@with_seed()
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
@with_seed()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
assert_raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
assert_raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
assert_raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
assert_raises(MXNetError, custom_exc4)
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9):
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1).astype(dtype)
data_in2 = np.random.uniform(1, 10, shape2).astype(dtype)
data_in3 = np.random.uniform(1, 10, shape3).astype(dtype)
data_in4 = np.random.uniform(1, 10, shape4).astype(dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
_gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7)
if default_context().device_type == 'gpu':
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1"
_gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
shape = (4, 4, 1, 1)
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw(test_potrf, [data_in], [res_potrf])
if grad_check == 1:
check_grad(test_potrf, [data_in])
# test potri
ones = mx.nd.ones(shape).asnumpy()
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw(test_potri, [data_in], [res_potri])
if grad_check == 1:
check_grad(test_potri, [data_in])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw(test_trsm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trsm, [trian_in,data_in])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw(test_trmm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trmm, [trian_in, data_in])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw(test_sumlogdiag, [data_in], [res_sumlogdiag])
if grad_check == 1:
check_grad(test_sumlogdiag, [data_in])
# more elaborate example of Cholesky factorization
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
low_trian = trian
if not lower:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw(test_potrf, [a], [r])
if grad_check == 1:
check_grad(test_potrf, [a])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw(test_potri, [a], [r])
if grad_check == 1:
check_grad(test_potri, [a])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw(test_trsm, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm, [a, b])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw(test_trsm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm2, [a, b])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw(test_trsm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm3, [a, b])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw(test_trsm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm4, [a, b])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw(test_trmm, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm, [a, b])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw(test_trmm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm2, [a, b])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw(test_trmm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm3, [a, b])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw(test_trmm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm4, [a, b])
# test sumlogdiag
a = rep_3x(pow, 4, 4)
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw(test_sumlogdiag, [a], [r])
if grad_check == 1:
check_grad(test_sumlogdiag, [a])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 10):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@with_seed()
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/14288")
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
check_dropout_ratio(0.5, shape, cudnn_off=False)
check_dropout_ratio(0.0, shape, cudnn_off=False)
check_dropout_ratio(1.0, shape, cudnn_off=False)
check_dropout_ratio(0.75, shape, cudnn_off=False)
check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
check_passthrough(0.5, shape, cudnn_off=False)
check_passthrough(0.0, shape, cudnn_off=False)
check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
def test_softmax():
check_softmax_with_shape((3, 4), default_context(), preserve_shape=False)
check_softmax_with_shape((3, 4), default_context(), preserve_shape=True)
check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True)
check_softmax_grad(default_context())
check_smoothed_softmax_grad(default_context())
@with_seed()
def test_softmax_output_normalization():
def _softmaxoutput_normalization(multi_output, use_ignore, normalization):
grad_scale = np.random.random()
batch_size = 8
num_labels = 6
H, W = 3, 3
ignore_label = np.random.randint(0, num_labels) if use_ignore else -1
if multi_output:
data_shape = (batch_size, num_labels, H, W)
label_shape = (batch_size, H, W)
else:
data_shape = (batch_size, num_labels)
label_shape = (batch_size, )
data = mx.nd.random.uniform(-1, 1, shape=data_shape)
label = mx.nd.random.randint(
0, num_labels, shape=label_shape).astype('float32')
data.attach_grad()
kwargs = dict(grad_scale=grad_scale,
normalization=normalization, multi_output=multi_output)
if use_ignore:
kwargs.update(use_ignore=True, ignore_label=ignore_label)
with mx.autograd.record():
out = mx.nd.SoftmaxOutput(data=data, label=label, **kwargs)
out.backward(mx.nd.ones_like(data))
exp_data = mx.nd.exp(data)
softmax_data = exp_data / exp_data.sum(1, keepdims=True)
argmax_data = mx.nd.argmax(data, axis=1)
assert_almost_equal(out.asnumpy(), softmax_data.asnumpy())
one_hot_label = mx.nd.one_hot(label, num_labels)
if multi_output:
one_hot_label = one_hot_label.transpose((0, 3, 1, 2))
data_grad = softmax_data - one_hot_label
if use_ignore:
if multi_output:
data_grad *= (label !=
ignore_label).reshape((batch_size, 1, H, W))
else:
data_grad *= (label != ignore_label).reshape((batch_size, 1))
valid_cnt = 1
if normalization == 'batch':
valid_cnt = batch_size
elif normalization == 'valid':
valid_cnt = mx.nd.maximum(1, (label != ignore_label).sum())
scale = grad_scale / valid_cnt
if multi_output:
if normalization != 'valid':
scale /= H * W
data_grad *= scale
assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy())
for multi_output in [False, True]:
for use_ignore in [False, True]:
for normalization in ['null', 'batch', 'valid']:
_softmaxoutput_normalization(
multi_output, use_ignore, normalization)
@with_seed()
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
assert_raises(MXNetError, min)
assert_raises(MXNetError, max)
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output.asnumpy(),expected,
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1.asnumpy(), np_bins1)
assert_almost_equal(mx_histo1.asnumpy(), np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2.asnumpy(), np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2.asnumpy(), np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1.bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check output names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
def test_op_all_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check all names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['data', 'act_input0', 'act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'softmax_output'])
length = mx.sym.Variable("length", shape=(10, 10, 10))
sm_sym = mx.sym.softmax(data, length, axis=1, use_length=True, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'length', 'softmax_length', 'softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['data', 'pooling_data', 'pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
assert_almost_equal(data.grad.asnumpy(), dx, atol=1e-3)
assert_almost_equal(rois.grad.asnumpy(), drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k == 0
r = mx.nd.diag(a)
assert_almost_equal(r.asnumpy(), np.diag(a_np))
# k == 1
k = 1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# k == -1
k = -1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# random k
k = np.random.randint(-min(h,w) + 1, min(h,w))
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@with_seed()
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
@with_seed()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
@with_seed()
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
@with_seed()
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
ok_(isinstance(ops, list))
ok_(len(ops) > 0)
ok_('Activation' in ops)
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
ok_(isinstance(operator_arguments, OperatorArguments))
ok_(operator_arguments.names == ['data', 'act_type'])
ok_(operator_arguments.types
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"])
ok_(operator_arguments.narg == 2)
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
if __name__ == '__main__':
import nose
nose.runmodule()
|
test.py
|
import pytest
import time
import psycopg2
import os.path as p
import random
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from helpers.test_tools import TSV
from random import randrange
import threading
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
main_configs = ['configs/log_conf.xml'],
user_configs = ['configs/users.xml'],
with_postgres=True, stay_alive=True)
postgres_table_template = """
CREATE TABLE IF NOT EXISTS "{}" (
key Integer NOT NULL, value Integer, PRIMARY KEY(key))
"""
postgres_table_template_2 = """
CREATE TABLE IF NOT EXISTS "{}" (
key Integer NOT NULL, value1 Integer, value2 Integer, value3 Integer, PRIMARY KEY(key))
"""
postgres_table_template_3 = """
CREATE TABLE IF NOT EXISTS "{}" (
key1 Integer NOT NULL, value1 Integer, key2 Integer NOT NULL, value2 Integer NOT NULL)
"""
def get_postgres_conn(ip, port, database=False, auto_commit=True, database_name='postgres_database', replication=False):
if database == True:
conn_string = "host={} port={} dbname='{}' user='postgres' password='mysecretpassword'".format(ip, port, database_name)
else:
conn_string = "host={} port={} user='postgres' password='mysecretpassword'".format(ip, port)
if replication:
conn_string += " replication='database'"
conn = psycopg2.connect(conn_string)
if auto_commit:
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
conn.autocommit = True
return conn
def create_replication_slot(conn, slot_name='user_slot'):
cursor = conn.cursor()
cursor.execute('CREATE_REPLICATION_SLOT {} LOGICAL pgoutput EXPORT_SNAPSHOT'.format(slot_name))
result = cursor.fetchall()
print(result[0][0]) # slot name
print(result[0][1]) # start lsn
print(result[0][2]) # snapshot
return result[0][2]
def drop_replication_slot(conn, slot_name='user_slot'):
cursor = conn.cursor()
cursor.execute("select pg_drop_replication_slot('{}')".format(slot_name))
def create_postgres_db(cursor, name='postgres_database'):
cursor.execute("CREATE DATABASE {}".format(name))
def drop_postgres_db(cursor, name='postgres_database'):
cursor.execute("DROP DATABASE IF EXISTS {}".format(name))
def create_clickhouse_postgres_db(ip, port, name='postgres_database'):
instance.query('''
CREATE DATABASE {}
ENGINE = PostgreSQL('{}:{}', '{}', 'postgres', 'mysecretpassword')'''.format(name, ip, port, name))
def drop_clickhouse_postgres_db(name='postgres_database'):
instance.query('DROP DATABASE {}'.format(name))
def create_materialized_db(ip, port,
materialized_database='test_database',
postgres_database='postgres_database',
settings=[]):
create_query = "CREATE DATABASE {} ENGINE = MaterializedPostgreSQL('{}:{}', '{}', 'postgres', 'mysecretpassword')".format(materialized_database, ip, port, postgres_database)
if len(settings) > 0:
create_query += " SETTINGS "
for i in range(len(settings)):
if i != 0:
create_query += ', '
create_query += settings[i]
instance.query(create_query)
assert materialized_database in instance.query('SHOW DATABASES')
def drop_materialized_db(materialized_database='test_database'):
instance.query('DROP DATABASE IF EXISTS {}'.format(materialized_database))
assert materialized_database not in instance.query('SHOW DATABASES')
def drop_postgres_table(cursor, table_name):
cursor.execute("""DROP TABLE IF EXISTS "{}" """.format(table_name))
def create_postgres_table(cursor, table_name, replica_identity_full=False, template=postgres_table_template):
drop_postgres_table(cursor, table_name)
cursor.execute(template.format(table_name))
if replica_identity_full:
cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name))
queries = [
'INSERT INTO postgresql_replica_{} select i, i from generate_series(0, 10000) as t(i);',
'DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;',
'UPDATE postgresql_replica_{} SET value = value - 125 WHERE key % 2 = 0;',
"UPDATE postgresql_replica_{} SET key=key+20000 WHERE key%2=0",
'INSERT INTO postgresql_replica_{} select i, i from generate_series(40000, 50000) as t(i);',
'DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;',
'UPDATE postgresql_replica_{} SET value = value + 101 WHERE key % 2 = 1;',
"UPDATE postgresql_replica_{} SET key=key+80000 WHERE key%2=1",
'DELETE FROM postgresql_replica_{} WHERE value % 2 = 0;',
'UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key % 5 = 0;',
'INSERT INTO postgresql_replica_{} select i, i from generate_series(200000, 250000) as t(i);',
'DELETE FROM postgresql_replica_{} WHERE value % 3 = 0;',
'UPDATE postgresql_replica_{} SET value = value * 2 WHERE key % 3 = 0;',
"UPDATE postgresql_replica_{} SET key=key+500000 WHERE key%2=1",
'INSERT INTO postgresql_replica_{} select i, i from generate_series(1000000, 1050000) as t(i);',
'DELETE FROM postgresql_replica_{} WHERE value % 9 = 2;',
"UPDATE postgresql_replica_{} SET key=key+10000000",
'UPDATE postgresql_replica_{} SET value = value + 2 WHERE key % 3 = 1;',
'DELETE FROM postgresql_replica_{} WHERE value%5 = 0;'
]
def assert_nested_table_is_created(table_name, materialized_database='test_database'):
database_tables = instance.query('SHOW TABLES FROM {}'.format(materialized_database))
while table_name not in database_tables:
time.sleep(0.2)
database_tables = instance.query('SHOW TABLES FROM {}'.format(materialized_database))
assert(table_name in database_tables)
@pytest.mark.timeout(320)
def check_tables_are_synchronized(table_name, order_by='key', postgres_database='postgres_database', materialized_database='test_database'):
assert_nested_table_is_created(table_name, materialized_database)
expected = instance.query('select * from {}.{} order by {};'.format(postgres_database, table_name, order_by))
result = instance.query('select * from {}.{} order by {};'.format(materialized_database, table_name, order_by))
while result != expected:
time.sleep(0.5)
result = instance.query('select * from {}.{} order by {};'.format(materialized_database, table_name, order_by))
assert(result == expected)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
conn = get_postgres_conn(ip=cluster.postgres_ip, port=cluster.postgres_port)
cursor = conn.cursor()
create_postgres_db(cursor, 'postgres_database')
create_clickhouse_postgres_db(ip=cluster.postgres_ip, port=cluster.postgres_port)
instance.query("DROP DATABASE IF EXISTS test_database")
yield cluster
finally:
cluster.shutdown()
def test_load_and_sync_all_database_tables(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
NUM_TABLES = 5
for i in range(NUM_TABLES):
table_name = 'postgresql_replica_{}'.format(i)
create_postgres_table(cursor, table_name);
instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(50)".format(table_name))
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port)
assert 'test_database' in instance.query('SHOW DATABASES')
for i in range(NUM_TABLES):
table_name = 'postgresql_replica_{}'.format(i)
check_tables_are_synchronized(table_name);
cursor.execute('drop table {};'.format(table_name))
result = instance.query('''SELECT count() FROM system.tables WHERE database = 'test_database';''')
assert(int(result) == NUM_TABLES)
drop_materialized_db()
for i in range(NUM_TABLES):
cursor.execute('drop table if exists postgresql_replica_{};'.format(i))
def test_replicating_dml(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
NUM_TABLES = 5
for i in range(NUM_TABLES):
create_postgres_table(cursor, 'postgresql_replica_{}'.format(i));
instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(50)".format(i, i))
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port)
for i in range(NUM_TABLES):
instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 50 + number, {} from numbers(1000)".format(i, i))
for i in range(NUM_TABLES):
table_name = 'postgresql_replica_{}'.format(i)
check_tables_are_synchronized(table_name);
for i in range(NUM_TABLES):
cursor.execute('UPDATE postgresql_replica_{} SET value = {} * {} WHERE key < 50;'.format(i, i, i))
cursor.execute('UPDATE postgresql_replica_{} SET value = {} * {} * {} WHERE key >= 50;'.format(i, i, i, i))
for i in range(NUM_TABLES):
check_tables_are_synchronized('postgresql_replica_{}'.format(i));
for i in range(NUM_TABLES):
cursor.execute('DELETE FROM postgresql_replica_{} WHERE (value*value + {}) % 2 = 0;'.format(i, i))
cursor.execute('UPDATE postgresql_replica_{} SET value = value - (value % 7) WHERE key > 128 AND key < 512;'.format(i))
cursor.execute('DELETE FROM postgresql_replica_{} WHERE key % 7 = 1;'.format(i, i))
for i in range(NUM_TABLES):
check_tables_are_synchronized('postgresql_replica_{}'.format(i));
for i in range(NUM_TABLES):
cursor.execute('drop table if exists postgresql_replica_{};'.format(i))
drop_materialized_db()
def test_different_data_types(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
cursor.execute('drop table if exists test_data_types;')
cursor.execute('drop table if exists test_array_data_type;')
cursor.execute(
'''CREATE TABLE test_data_types (
id integer PRIMARY KEY, a smallint, b integer, c bigint, d real, e double precision, f serial, g bigserial,
h timestamp, i date, j decimal(5, 5), k numeric(5, 5))''')
cursor.execute(
'''CREATE TABLE test_array_data_type
(
key Integer NOT NULL PRIMARY KEY,
a Date[] NOT NULL, -- Date
b Timestamp[] NOT NULL, -- DateTime64(6)
c real[][] NOT NULL, -- Float32
d double precision[][] NOT NULL, -- Float64
e decimal(5, 5)[][][] NOT NULL, -- Decimal32
f integer[][][] NOT NULL, -- Int32
g Text[][][][][] NOT NULL, -- String
h Integer[][][], -- Nullable(Int32)
i Char(2)[][][][], -- Nullable(String)
k Char(2)[] -- Nullable(String)
)''')
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port)
for i in range(10):
instance.query('''
INSERT INTO postgres_database.test_data_types VALUES
({}, -32768, -2147483648, -9223372036854775808, 1.12345, 1.1234567890, 2147483647, 9223372036854775807, '2000-05-12 12:12:12.012345', '2000-05-12', 0.2, 0.2)'''.format(i))
check_tables_are_synchronized('test_data_types', 'id');
result = instance.query('SELECT * FROM test_database.test_data_types ORDER BY id LIMIT 1;')
assert(result == '0\t-32768\t-2147483648\t-9223372036854775808\t1.12345\t1.123456789\t2147483647\t9223372036854775807\t2000-05-12 12:12:12.012345\t2000-05-12\t0.2\t0.2\n')
for i in range(10):
col = random.choice(['a', 'b', 'c'])
cursor.execute('UPDATE test_data_types SET {} = {};'.format(col, i))
cursor.execute('''UPDATE test_data_types SET i = '2020-12-12';'''.format(col, i))
check_tables_are_synchronized('test_data_types', 'id');
instance.query("INSERT INTO postgres_database.test_array_data_type "
"VALUES ("
"0, "
"['2000-05-12', '2000-05-12'], "
"['2000-05-12 12:12:12.012345', '2000-05-12 12:12:12.012345'], "
"[[1.12345], [1.12345], [1.12345]], "
"[[1.1234567891], [1.1234567891], [1.1234567891]], "
"[[[0.11111, 0.11111]], [[0.22222, 0.22222]], [[0.33333, 0.33333]]], "
"[[[1, 1], [1, 1]], [[3, 3], [3, 3]], [[4, 4], [5, 5]]], "
"[[[[['winx', 'winx', 'winx']]]]], "
"[[[1, NULL], [NULL, 1]], [[NULL, NULL], [NULL, NULL]], [[4, 4], [5, 5]]], "
"[[[[NULL]]]], "
"[]"
")")
expected = (
"0\t" +
"['2000-05-12','2000-05-12']\t" +
"['2000-05-12 12:12:12.012345','2000-05-12 12:12:12.012345']\t" +
"[[1.12345],[1.12345],[1.12345]]\t" +
"[[1.1234567891],[1.1234567891],[1.1234567891]]\t" +
"[[[0.11111,0.11111]],[[0.22222,0.22222]],[[0.33333,0.33333]]]\t"
"[[[1,1],[1,1]],[[3,3],[3,3]],[[4,4],[5,5]]]\t"
"[[[[['winx','winx','winx']]]]]\t"
"[[[1,NULL],[NULL,1]],[[NULL,NULL],[NULL,NULL]],[[4,4],[5,5]]]\t"
"[[[[NULL]]]]\t"
"[]\n"
)
check_tables_are_synchronized('test_array_data_type');
result = instance.query('SELECT * FROM test_database.test_array_data_type ORDER BY key;')
assert(result == expected)
drop_materialized_db()
cursor.execute('drop table if exists test_data_types;')
cursor.execute('drop table if exists test_array_data_type;')
def test_load_and_sync_subset_of_database_tables(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
NUM_TABLES = 10
publication_tables = ''
for i in range(NUM_TABLES):
table_name = 'postgresql_replica_{}'.format(i)
create_postgres_table(cursor, 'postgresql_replica_{}'.format(i));
instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, number from numbers(50)".format(i))
if i < int(NUM_TABLES/2):
if publication_tables != '':
publication_tables += ', '
publication_tables += table_name
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
settings=["materialized_postgresql_tables_list = '{}'".format(publication_tables)])
assert 'test_database' in instance.query('SHOW DATABASES')
time.sleep(1)
for i in range(int(NUM_TABLES/2)):
table_name = 'postgresql_replica_{}'.format(i)
assert_nested_table_is_created(table_name)
result = instance.query('''SELECT count() FROM system.tables WHERE database = 'test_database';''')
assert(int(result) == int(NUM_TABLES/2))
database_tables = instance.query('SHOW TABLES FROM test_database')
for i in range(NUM_TABLES):
table_name = 'postgresql_replica_{}'.format(i)
if i < int(NUM_TABLES/2):
assert table_name in database_tables
else:
assert table_name not in database_tables
instance.query("INSERT INTO postgres_database.{} SELECT 50 + number, {} from numbers(100)".format(table_name, i))
for i in range(NUM_TABLES):
table_name = 'postgresql_replica_{}'.format(i)
if i < int(NUM_TABLES/2):
check_tables_are_synchronized(table_name);
drop_materialized_db()
for i in range(NUM_TABLES):
cursor.execute('drop table if exists postgresql_replica_{};'.format(i))
def test_changing_replica_identity_value(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
create_postgres_table(cursor, 'postgresql_replica');
instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, number from numbers(50)")
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port)
instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 100 + number, number from numbers(50)")
check_tables_are_synchronized('postgresql_replica');
cursor.execute("UPDATE postgresql_replica SET key=key-25 WHERE key<100 ")
check_tables_are_synchronized('postgresql_replica');
drop_materialized_db()
cursor.execute('drop table if exists postgresql_replica;')
def test_clickhouse_restart(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
NUM_TABLES = 5
for i in range(NUM_TABLES):
create_postgres_table(cursor, 'postgresql_replica_{}'.format(i));
instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(50)".format(i, i))
instance.query("CREATE DATABASE test_database ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')")
for i in range(NUM_TABLES):
table_name = 'postgresql_replica_{}'.format(i)
check_tables_are_synchronized(table_name);
for i in range(NUM_TABLES):
instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 50 + number, {} from numbers(50000)".format(i, i))
instance.restart_clickhouse()
for i in range(NUM_TABLES):
check_tables_are_synchronized('postgresql_replica_{}'.format(i));
drop_materialized_db()
for i in range(NUM_TABLES):
cursor.execute('drop table if exists postgresql_replica_{};'.format(i))
def test_replica_identity_index(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
create_postgres_table(cursor, 'postgresql_replica', template=postgres_table_template_3);
cursor.execute("CREATE unique INDEX idx on postgresql_replica(key1, key2);")
cursor.execute("ALTER TABLE postgresql_replica REPLICA IDENTITY USING INDEX idx")
instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number, number, number from numbers(50, 10)")
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port)
instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number, number, number from numbers(100, 10)")
check_tables_are_synchronized('postgresql_replica', order_by='key1');
cursor.execute("UPDATE postgresql_replica SET key1=key1-25 WHERE key1<100 ")
cursor.execute("UPDATE postgresql_replica SET key2=key2-25 WHERE key2>100 ")
cursor.execute("UPDATE postgresql_replica SET value1=value1+100 WHERE key1<100 ")
cursor.execute("UPDATE postgresql_replica SET value2=value2+200 WHERE key2>100 ")
check_tables_are_synchronized('postgresql_replica', order_by='key1');
cursor.execute('DELETE FROM postgresql_replica WHERE key2<75;')
check_tables_are_synchronized('postgresql_replica', order_by='key1');
drop_materialized_db()
cursor.execute('drop table if exists postgresql_replica;')
def test_table_schema_changes(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
NUM_TABLES = 5
for i in range(NUM_TABLES):
create_postgres_table(cursor, 'postgresql_replica_{}'.format(i), template=postgres_table_template_2);
instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {}, {}, {} from numbers(25)".format(i, i, i, i))
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
settings=["materialized_postgresql_allow_automatic_update = 1"])
for i in range(NUM_TABLES):
instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 25 + number, {}, {}, {} from numbers(25)".format(i, i, i, i))
for i in range(NUM_TABLES):
check_tables_are_synchronized('postgresql_replica_{}'.format(i));
expected = instance.query("SELECT key, value1, value3 FROM test_database.postgresql_replica_3 ORDER BY key");
altered_table = random.randint(0, 4)
cursor.execute("ALTER TABLE postgresql_replica_{} DROP COLUMN value2".format(altered_table))
for i in range(NUM_TABLES):
cursor.execute("INSERT INTO postgresql_replica_{} VALUES (50, {}, {})".format(i, i, i))
cursor.execute("UPDATE postgresql_replica_{} SET value3 = 12 WHERE key%2=0".format(i))
assert_nested_table_is_created('postgresql_replica_{}'.format(altered_table))
check_tables_are_synchronized('postgresql_replica_{}'.format(altered_table))
print('check1 OK')
for i in range(NUM_TABLES):
check_tables_are_synchronized('postgresql_replica_{}'.format(i));
for i in range(NUM_TABLES):
if i != altered_table:
instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {}, {} from numbers(49)".format(i, i, i, i))
else:
instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {} from numbers(49)".format(i, i, i))
check_tables_are_synchronized('postgresql_replica_{}'.format(altered_table));
print('check2 OK')
for i in range(NUM_TABLES):
check_tables_are_synchronized('postgresql_replica_{}'.format(i));
for i in range(NUM_TABLES):
cursor.execute('drop table postgresql_replica_{};'.format(i))
instance.query("DROP DATABASE test_database")
for i in range(NUM_TABLES):
cursor.execute('drop table if exists postgresql_replica_{};'.format(i))
def test_many_concurrent_queries(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
NUM_TABLES = 5
for i in range(NUM_TABLES):
create_postgres_table(cursor, 'postgresql_replica_{}'.format(i));
instance.query('INSERT INTO postgres_database.postgresql_replica_{} SELECT number, number from numbers(10000)'.format(i))
n = [10000]
query_pool = ['DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;',
'UPDATE postgresql_replica_{} SET value = value - 125 WHERE key % 2 = 0;',
'DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;',
'UPDATE postgresql_replica_{} SET value = value*5 WHERE key % 2 = 1;',
'DELETE FROM postgresql_replica_{} WHERE value % 2 = 0;',
'UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key % 5 = 0;',
'DELETE FROM postgresql_replica_{} WHERE value % 3 = 0;',
'UPDATE postgresql_replica_{} SET value = value * 2 WHERE key % 3 = 0;',
'DELETE FROM postgresql_replica_{} WHERE value % 9 = 2;',
'UPDATE postgresql_replica_{} SET value = value + 2 WHERE key % 3 = 1;',
'DELETE FROM postgresql_replica_{} WHERE value%5 = 0;']
def attack(thread_id):
print('thread {}'.format(thread_id))
k = 10000
for i in range(20):
query_id = random.randrange(0, len(query_pool)-1)
table_id = random.randrange(0, 5) # num tables
# random update / delete query
cursor.execute(query_pool[query_id].format(table_id))
print("table {} query {} ok".format(table_id, query_id))
# allow some thread to do inserts (not to violate key constraints)
if thread_id < 5:
print("try insert table {}".format(thread_id))
instance.query('INSERT INTO postgres_database.postgresql_replica_{} SELECT {}*10000*({} + number), number from numbers(1000)'.format(i, thread_id, k))
k += 1
print("insert table {} ok".format(thread_id))
if i == 5:
# also change primary key value
print("try update primary key {}".format(thread_id))
cursor.execute("UPDATE postgresql_replica_{} SET key=key%100000+100000*{} WHERE key%{}=0".format(thread_id, i+1, i+1))
print("update primary key {} ok".format(thread_id))
threads = []
threads_num = 16
for i in range(threads_num):
threads.append(threading.Thread(target=attack, args=(i,)))
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port)
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
n[0] = 50000
for table_id in range(NUM_TABLES):
n[0] += 1
instance.query('INSERT INTO postgres_database.postgresql_replica_{} SELECT {} + number, number from numbers(5000)'.format(table_id, n[0]))
#cursor.execute("UPDATE postgresql_replica_{} SET key=key%100000+100000*{} WHERE key%{}=0".format(table_id, table_id+1, table_id+1))
for thread in threads:
thread.join()
for i in range(NUM_TABLES):
check_tables_are_synchronized('postgresql_replica_{}'.format(i));
count1 = instance.query('SELECT count() FROM postgres_database.postgresql_replica_{}'.format(i))
count2 = instance.query('SELECT count() FROM (SELECT * FROM test_database.postgresql_replica_{})'.format(i))
assert(int(count1) == int(count2))
print(count1, count2)
drop_materialized_db()
for i in range(NUM_TABLES):
cursor.execute('drop table if exists postgresql_replica_{};'.format(i))
def test_single_transaction(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True, auto_commit=False)
cursor = conn.cursor()
create_postgres_table(cursor, 'postgresql_replica_0');
conn.commit()
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port)
assert_nested_table_is_created('postgresql_replica_0')
for query in queries:
print('query {}'.format(query))
cursor.execute(query.format(0))
time.sleep(5)
result = instance.query("select count() from test_database.postgresql_replica_0")
# no commit yet
assert(int(result) == 0)
conn.commit()
check_tables_are_synchronized('postgresql_replica_0');
drop_materialized_db()
cursor.execute('drop table if exists postgresql_replica_0;')
def test_virtual_columns(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
create_postgres_table(cursor, 'postgresql_replica_0');
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
settings=["materialized_postgresql_allow_automatic_update = 1"])
assert_nested_table_is_created('postgresql_replica_0')
instance.query("INSERT INTO postgres_database.postgresql_replica_0 SELECT number, number from numbers(10)")
check_tables_are_synchronized('postgresql_replica_0');
# just check that it works, no check with `expected` because _version is taken as LSN, which will be different each time.
result = instance.query('SELECT key, value, _sign, _version FROM test_database.postgresql_replica_0;')
print(result)
cursor.execute("ALTER TABLE postgresql_replica_0 ADD COLUMN value2 integer")
instance.query("INSERT INTO postgres_database.postgresql_replica_0 SELECT number, number, number from numbers(10, 10)")
check_tables_are_synchronized('postgresql_replica_0');
result = instance.query('SELECT key, value, value2, _sign, _version FROM test_database.postgresql_replica_0;')
print(result)
instance.query("INSERT INTO postgres_database.postgresql_replica_0 SELECT number, number, number from numbers(20, 10)")
check_tables_are_synchronized('postgresql_replica_0');
result = instance.query('SELECT key, value, value2, _sign, _version FROM test_database.postgresql_replica_0;')
print(result)
drop_materialized_db()
cursor.execute('drop table if exists postgresql_replica_0;')
def test_multiple_databases(started_cluster):
drop_materialized_db('test_database_1')
drop_materialized_db('test_database_2')
NUM_TABLES = 5
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=False)
cursor = conn.cursor()
create_postgres_db(cursor, 'postgres_database_1')
create_postgres_db(cursor, 'postgres_database_2')
conn1 = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True, database_name='postgres_database_1')
conn2 = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True, database_name='postgres_database_2')
cursor1 = conn1.cursor()
cursor2 = conn2.cursor()
create_clickhouse_postgres_db(cluster.postgres_ip, cluster.postgres_port, 'postgres_database_1')
create_clickhouse_postgres_db(cluster.postgres_ip, cluster.postgres_port, 'postgres_database_2')
cursors = [cursor1, cursor2]
for cursor_id in range(len(cursors)):
for i in range(NUM_TABLES):
table_name = 'postgresql_replica_{}'.format(i)
create_postgres_table(cursors[cursor_id], table_name);
instance.query("INSERT INTO postgres_database_{}.{} SELECT number, number from numbers(50)".format(cursor_id + 1, table_name))
print('database 1 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_1';'''))
print('database 2 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_2';'''))
create_materialized_db(started_cluster.postgres_ip, started_cluster.postgres_port,
'test_database_1', 'postgres_database_1')
create_materialized_db(started_cluster.postgres_ip, started_cluster.postgres_port,
'test_database_2', 'postgres_database_2')
cursors = [cursor1, cursor2]
for cursor_id in range(len(cursors)):
for i in range(NUM_TABLES):
table_name = 'postgresql_replica_{}'.format(i)
instance.query("INSERT INTO postgres_database_{}.{} SELECT 50 + number, number from numbers(50)".format(cursor_id + 1, table_name))
for cursor_id in range(len(cursors)):
for i in range(NUM_TABLES):
table_name = 'postgresql_replica_{}'.format(i)
check_tables_are_synchronized(
table_name, 'key', 'postgres_database_{}'.format(cursor_id + 1), 'test_database_{}'.format(cursor_id + 1));
for i in range(NUM_TABLES):
cursor1.execute('drop table if exists postgresql_replica_{};'.format(i))
for i in range(NUM_TABLES):
cursor2.execute('drop table if exists postgresql_replica_{};'.format(i))
drop_clickhouse_postgres_db('postgres_database_1')
drop_clickhouse_postgres_db('postgres_database_2')
drop_materialized_db('test_database_1')
drop_materialized_db('test_database_2')
def test_concurrent_transactions(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
NUM_TABLES = 6
for i in range(NUM_TABLES):
create_postgres_table(cursor, 'postgresql_replica_{}'.format(i));
def transaction(thread_id):
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True, auto_commit=False)
cursor_ = conn.cursor()
for query in queries:
cursor_.execute(query.format(thread_id))
print('thread {}, query {}'.format(thread_id, query))
conn.commit()
threads = []
threads_num = 6
for i in range(threads_num):
threads.append(threading.Thread(target=transaction, args=(i,)))
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port)
for thread in threads:
time.sleep(random.uniform(0, 0.5))
thread.start()
for thread in threads:
thread.join()
for i in range(NUM_TABLES):
check_tables_are_synchronized('postgresql_replica_{}'.format(i));
count1 = instance.query('SELECT count() FROM postgres_database.postgresql_replica_{}'.format(i))
count2 = instance.query('SELECT count() FROM (SELECT * FROM test_database.postgresql_replica_{})'.format(i))
print(int(count1), int(count2), sep=' ')
assert(int(count1) == int(count2))
drop_materialized_db()
for i in range(NUM_TABLES):
cursor.execute('drop table if exists postgresql_replica_{};'.format(i))
def test_abrupt_connection_loss_while_heavy_replication(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
NUM_TABLES = 6
for i in range(NUM_TABLES):
create_postgres_table(cursor, 'postgresql_replica_{}'.format(i));
def transaction(thread_id):
if thread_id % 2:
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True, auto_commit=True)
else:
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True, auto_commit=False)
cursor_ = conn.cursor()
for query in queries:
cursor_.execute(query.format(thread_id))
print('thread {}, query {}'.format(thread_id, query))
if thread_id % 2 == 0:
conn.commit()
threads = []
threads_num = 6
for i in range(threads_num):
threads.append(threading.Thread(target=transaction, args=(i,)))
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port)
for thread in threads:
time.sleep(random.uniform(0, 0.5))
thread.start()
# Join here because it takes time for data to reach wal
for thread in threads:
thread.join()
time.sleep(1)
started_cluster.pause_container('postgres1')
for i in range(NUM_TABLES):
result = instance.query("SELECT count() FROM test_database.postgresql_replica_{}".format(i))
print(result) # Just debug
started_cluster.unpause_container('postgres1')
for i in range(NUM_TABLES):
check_tables_are_synchronized('postgresql_replica_{}'.format(i));
for i in range(NUM_TABLES):
result = instance.query("SELECT count() FROM test_database.postgresql_replica_{}".format(i))
print(result) # Just debug
drop_materialized_db()
for i in range(NUM_TABLES):
cursor.execute('drop table if exists postgresql_replica_{};'.format(i))
def test_drop_database_while_replication_startup_not_finished(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
NUM_TABLES = 5
for i in range(NUM_TABLES):
table_name = 'postgresql_replica_{}'.format(i)
create_postgres_table(cursor, table_name);
instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(100000)".format(table_name))
for i in range(6):
create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port)
time.sleep(0.5 * i)
drop_materialized_db()
for i in range(NUM_TABLES):
cursor.execute('drop table if exists postgresql_replica_{};'.format(i))
def test_restart_server_while_replication_startup_not_finished(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
NUM_TABLES = 5
for i in range(NUM_TABLES):
table_name = 'postgresql_replica_{}'.format(i)
create_postgres_table(cursor, table_name);
instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(100000)".format(table_name))
create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port)
time.sleep(0.5)
instance.restart_clickhouse()
for i in range(NUM_TABLES):
check_tables_are_synchronized('postgresql_replica_{}'.format(i));
drop_materialized_db()
for i in range(NUM_TABLES):
cursor.execute('drop table postgresql_replica_{};'.format(i))
def test_abrupt_server_restart_while_heavy_replication(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
NUM_TABLES = 6
for i in range(NUM_TABLES):
create_postgres_table(cursor, 'postgresql_replica_{}'.format(i));
def transaction(thread_id):
if thread_id % 2:
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True, auto_commit=True)
else:
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True, auto_commit=False)
cursor_ = conn.cursor()
for query in queries:
cursor_.execute(query.format(thread_id))
print('thread {}, query {}'.format(thread_id, query))
if thread_id % 2 == 0:
conn.commit()
threads = []
threads_num = 6
for i in range(threads_num):
threads.append(threading.Thread(target=transaction, args=(i,)))
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port)
for thread in threads:
time.sleep(random.uniform(0, 0.5))
thread.start()
# Join here because it takes time for data to reach wal
for thread in threads:
thread.join()
instance.restart_clickhouse()
for i in range(NUM_TABLES):
result = instance.query("SELECT count() FROM test_database.postgresql_replica_{}".format(i))
print(result) # Just debug
for i in range(NUM_TABLES):
check_tables_are_synchronized('postgresql_replica_{}'.format(i));
for i in range(NUM_TABLES):
result = instance.query("SELECT count() FROM test_database.postgresql_replica_{}".format(i))
print(result) # Just debug
drop_materialized_db()
for i in range(NUM_TABLES):
cursor.execute('drop table if exists postgresql_replica_{};'.format(i))
def test_quoting(started_cluster):
table_name = 'user'
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
create_postgres_table(cursor, table_name);
instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(50)".format(table_name))
create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port)
check_tables_are_synchronized(table_name);
drop_postgres_table(cursor, table_name)
drop_materialized_db()
def test_user_managed_slots(started_cluster):
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
table_name = 'test_table'
create_postgres_table(cursor, table_name);
instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000)".format(table_name))
slot_name = 'user_slot'
replication_connection = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port,
database=True, replication=True, auto_commit=True)
snapshot = create_replication_slot(replication_connection, slot_name=slot_name)
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
settings=["materialized_postgresql_replication_slot = '{}'".format(slot_name),
"materialized_postgresql_snapshot = '{}'".format(snapshot)])
check_tables_are_synchronized(table_name);
instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000, 10000)".format(table_name))
check_tables_are_synchronized(table_name);
instance.restart_clickhouse()
instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(20000, 10000)".format(table_name))
check_tables_are_synchronized(table_name);
drop_postgres_table(cursor, table_name)
drop_materialized_db()
drop_replication_slot(replication_connection, slot_name)
def test_add_new_table_to_replication(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
NUM_TABLES = 5
for i in range(NUM_TABLES):
create_postgres_table(cursor, 'postgresql_replica_{}'.format(i));
instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(10000)".format(i, i))
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port)
for i in range(NUM_TABLES):
table_name = 'postgresql_replica_{}'.format(i)
check_tables_are_synchronized(table_name);
result = instance.query("SHOW TABLES FROM test_database")
assert(result == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\n")
table_name = 'postgresql_replica_5'
create_postgres_table(cursor, table_name)
instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000)".format(table_name))
result = instance.query('SHOW CREATE DATABASE test_database')
assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(") # Check without ip
assert(result[-59:] == "\\'postgres_database\\', \\'postgres\\', \\'mysecretpassword\\')\n")
result = instance.query_and_get_error("ALTER DATABASE test_database MODIFY SETTING materialized_postgresql_tables_list='tabl1'")
assert('Changing setting `materialized_postgresql_tables_list` is not allowed' in result)
result = instance.query_and_get_error("ALTER DATABASE test_database MODIFY SETTING materialized_postgresql_tables='tabl1'")
assert('Database engine MaterializedPostgreSQL does not support setting' in result)
instance.query("ATTACH TABLE test_database.{}".format(table_name));
result = instance.query("SHOW TABLES FROM test_database")
assert(result == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\npostgresql_replica_5\n")
check_tables_are_synchronized(table_name);
instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000, 10000)".format(table_name))
check_tables_are_synchronized(table_name);
result = instance.query_and_get_error("ATTACH TABLE test_database.{}".format(table_name));
assert('Table test_database.postgresql_replica_5 already exists' in result)
result = instance.query_and_get_error("ATTACH TABLE test_database.unknown_table");
assert('PostgreSQL table unknown_table does not exist' in result)
result = instance.query('SHOW CREATE DATABASE test_database')
assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(")
assert(result[-180:] == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4,postgresql_replica_5\\'\n")
table_name = 'postgresql_replica_6'
create_postgres_table(cursor, table_name)
instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000)".format(table_name))
instance.query("ATTACH TABLE test_database.{}".format(table_name));
instance.restart_clickhouse()
table_name = 'postgresql_replica_7'
create_postgres_table(cursor, table_name)
instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000)".format(table_name))
instance.query("ATTACH TABLE test_database.{}".format(table_name));
result = instance.query('SHOW CREATE DATABASE test_database')
assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(")
assert(result[-222:] == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4,postgresql_replica_5,postgresql_replica_6,postgresql_replica_7\\'\n")
result = instance.query("SHOW TABLES FROM test_database")
assert(result == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\npostgresql_replica_5\npostgresql_replica_6\npostgresql_replica_7\n")
for i in range(NUM_TABLES + 3):
table_name = 'postgresql_replica_{}'.format(i)
check_tables_are_synchronized(table_name);
for i in range(NUM_TABLES + 3):
cursor.execute('drop table if exists postgresql_replica_{};'.format(i))
def test_remove_table_from_replication(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database=True)
cursor = conn.cursor()
NUM_TABLES = 5
for i in range(NUM_TABLES):
create_postgres_table(cursor, 'postgresql_replica_{}'.format(i));
instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(10000)".format(i, i))
create_materialized_db(ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port)
for i in range(NUM_TABLES):
table_name = 'postgresql_replica_{}'.format(i)
check_tables_are_synchronized(table_name);
result = instance.query("SHOW TABLES FROM test_database")
assert(result == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\n")
result = instance.query('SHOW CREATE DATABASE test_database')
assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(")
assert(result[-59:] == "\\'postgres_database\\', \\'postgres\\', \\'mysecretpassword\\')\n")
table_name = 'postgresql_replica_4'
instance.query('DETACH TABLE test_database.{}'.format(table_name));
result = instance.query_and_get_error('SELECT * FROM test_database.{}'.format(table_name))
assert("doesn't exist" in result)
result = instance.query("SHOW TABLES FROM test_database")
assert(result == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\n")
result = instance.query('SHOW CREATE DATABASE test_database')
assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(")
assert(result[-138:] == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3\\'\n")
instance.query('ATTACH TABLE test_database.{}'.format(table_name));
check_tables_are_synchronized(table_name);
for i in range(NUM_TABLES):
table_name = 'postgresql_replica_{}'.format(i)
check_tables_are_synchronized(table_name);
result = instance.query('SHOW CREATE DATABASE test_database')
assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(")
assert(result[-159:] == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4\\'\n")
table_name = 'postgresql_replica_1'
instance.query('DETACH TABLE test_database.{}'.format(table_name));
result = instance.query('SHOW CREATE DATABASE test_database')
assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(")
assert(result[-138:] == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4\\'\n")
for i in range(NUM_TABLES):
cursor.execute('drop table if exists postgresql_replica_{};'.format(i))
def test_predefined_connection_configuration(started_cluster):
drop_materialized_db()
conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True)
cursor = conn.cursor()
cursor.execute(f'DROP TABLE IF EXISTS test_table')
cursor.execute(f'CREATE TABLE test_table (key integer PRIMARY KEY, value integer)')
instance.query("CREATE DATABASE test_database ENGINE = MaterializedPostgreSQL(postgres1)")
check_tables_are_synchronized("test_table");
drop_materialized_db()
if __name__ == '__main__':
cluster.start()
input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
sdca_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SdcaModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from threading import Thread
import tensorflow as tf
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import _sdca_ops
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import _ShardedMutableHashTable
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SparseFeatureColumn
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.platform import googletest
_MAX_ITERATIONS = 100
_SHARD_NUMBERS = [None, 1, 3, 10]
_NUM_PARTITIONS = [2, 4]
def make_example_proto(feature_dict, target, value=1.0):
e = tf.train.Example()
features = e.features
features.feature['target'].float_list.value.append(target)
for key, values in feature_dict.items():
features.feature[key + '_indices'].int64_list.value.extend(values)
features.feature[key + '_values'].float_list.value.extend([value] *
len(values))
return e
def make_example_dict(example_protos, example_weights):
def parse_examples(example_protos):
features = {
'target': tf.FixedLenFeature(shape=[1],
dtype=tf.float32,
default_value=0),
'age_indices': tf.VarLenFeature(dtype=tf.int64),
'age_values': tf.VarLenFeature(dtype=tf.float32),
'gender_indices': tf.VarLenFeature(dtype=tf.int64),
'gender_values': tf.VarLenFeature(dtype=tf.float32)
}
return tf.parse_example(
[e.SerializeToString() for e in example_protos], features)
parsed = parse_examples(example_protos)
sparse_features = [
SparseFeatureColumn(
tf.reshape(
tf.split(1, 2, parsed['age_indices'].indices)[0], [-1]),
tf.reshape(parsed['age_indices'].values, [-1]),
tf.reshape(parsed['age_values'].values, [-1])), SparseFeatureColumn(
tf.reshape(
tf.split(1, 2, parsed['gender_indices'].indices)[0], [-1]),
tf.reshape(parsed['gender_indices'].values, [-1]),
tf.reshape(parsed['gender_values'].values, [-1]))
]
return dict(sparse_features=sparse_features,
dense_features=[],
example_weights=example_weights,
example_labels=tf.reshape(parsed['target'], [-1]),
example_ids=['%d' % i for i in range(0, len(example_protos))])
def make_variable_dict(max_age, max_gender):
# TODO(sibyl-toe9oF2e): Figure out how to derive max_age & max_gender from
# examples_dict.
age_weights = tf.Variable(tf.zeros([max_age + 1], dtype=tf.float32))
gender_weights = tf.Variable(tf.zeros([max_gender + 1], dtype=tf.float32))
return dict(sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
def make_dense_examples_and_variables_dicts(dense_features_values, weights,
labels):
"""Creates examples and variables dictionaries for dense features.
Variables shapes are inferred from the list of dense feature values passed as
argument.
Args:
dense_features_values: The values of the dense features
weights: The example weights.
labels: The example labels.
Returns:
One dictionary for the examples and one for the variables.
"""
dense_tensors = []
dense_weights = []
for dense_feature in dense_features_values:
dense_tensor = tf.convert_to_tensor(dense_feature, dtype=tf.float32)
check_shape_op = tf.Assert(
tf.less_equal(tf.rank(dense_tensor), 2),
['dense_tensor shape must be [batch_size, dimension] or [batch_size]'])
# Reshape to [batch_size, dense_column_dimension].
with tf.control_dependencies([check_shape_op]):
dense_tensor = tf.reshape(dense_tensor,
[dense_tensor.get_shape().as_list()[0], -1])
dense_tensors.append(dense_tensor)
# Add variables of shape [feature_column_dimension].
dense_weights.append(
tf.Variable(
tf.zeros(
[dense_tensor.get_shape().as_list()[1]], dtype=tf.float32)))
examples_dict = dict(
sparse_features=[],
dense_features=dense_tensors,
example_weights=weights,
example_labels=labels,
example_ids=['%d' % i for i in range(0, len(labels))])
variables_dict = dict(
sparse_features_weights=[], dense_features_weights=dense_weights)
return examples_dict, variables_dict
def get_binary_predictions_for_logistic(predictions, cutoff=0.5):
return tf.cast(
tf.greater_equal(predictions, tf.ones_like(predictions) * cutoff),
dtype=tf.int32)
def get_binary_predictions_for_hinge(predictions):
return tf.cast(
tf.greater_equal(predictions, tf.zeros_like(predictions)),
dtype=tf.int32)
# TODO(sibyl-Mooth6ku): Add tests that exercise L1 and Shrinking.
# TODO(sibyl-vie3Poto): Refactor tests to avoid repetition of boilerplate code.
class SdcaModelTest(TensorFlowTestCase):
"""Base SDCA optimizer test class for any loss type."""
def _single_threaded_test_session(self):
config = tf.ConfigProto(inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
return self.test_session(use_gpu=False, config=config)
class SdcaWithLogisticLossTest(SdcaModelTest):
"""SDCA optimizer test class for logistic loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testDistributedSimple(self):
# Setup test data
example_protos = [
make_example_proto({'age': [0],
'gender': [0]}, 0),
make_example_proto({'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
for num_partitions in _NUM_PARTITIONS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss',
num_table_shards=num_shards,
num_partitions=num_partitions)
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
def Minimize():
with self._single_threaded_test_session():
for _ in range(_MAX_ITERATIONS):
train_op.run()
threads = []
for _ in range(num_partitions):
threads.append(Thread(target=Minimize))
threads[-1].start()
for t in threads:
t.join()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures
# that the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertTrue(lr.approximate_duality_gap().eval() < 0.02)
def testSimpleNoL2(self):
# Same as test above (so comments from above apply) but without an L2.
# The algorithm should behave as if we have an L2 of 1 in optimization but
# 0 in regularized_loss.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=0,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# There is neither L1 nor L2 loss, so regularized and unregularized
# losses should be exactly the same.
self.assertAllClose(0.40244, unregularized_loss.eval(), atol=0.01)
self.assertAllClose(0.40244, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testSomeUnweightedExamples(self):
# Setup test data with 4 examples, but should produce the same
# results as testSimple.
example_protos = [
# Will be used.
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
# Will be ignored.
make_example_proto(
{'age': [1],
'gender': [0]}, 0),
# Will be used.
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
# Will be ignored.
make_example_proto(
{'age': [1],
'gender': [0]}, 1),
]
example_weights = [1.0, 0.0, 1.0, 0.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
# Only use examples 0 and 2
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testFractionalExampleLabel(self):
# Setup test data with 1 positive, and 1 mostly-negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0.1),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
with self.assertRaisesOpError(
'Only labels of 0.0 or 1.0 are supported right now.'):
lr.minimize().run()
def testImbalanced(self):
# Setup test data with 1 positive, and 3 negative examples.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [2],
'gender': [0]}, 0),
make_example_proto(
{'age': [3],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(3, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose(0.226487 + 0.102902,
unregularized_loss.eval(),
atol=0.08)
self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
self.assertAllClose(0.0,
lr.approximate_duality_gap().eval(),
rtol=2e-2,
atol=1e-2)
def testImbalancedWithExampleWeights(self):
# Setup test data with 1 positive, and 1 negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [3.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.408044, loss.eval(), atol=0.012)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(0.0,
lr.approximate_duality_gap().eval(),
rtol=2e-2,
atol=1e-2)
def testInstancesOfOneClassOnly(self):
# Setup test data with 1 positive (ignored), and 1 negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [0]}, 1), # Shares gender with the instance above.
]
example_weights = [1.0, 0.0] # Second example "omitted" from training.
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testOutOfRangeSparseFeatures(self):
# Setup test data
example_protos = [
make_example_proto({'age': [0],
'gender': [0]}, 0),
make_example_proto({'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(0, 0)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
'Found sparse feature indices out.*'):
train_op.run()
def testOutOfRangeDenseFeatures(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[1.0, 0.0])
# Replace with a variable of size 1 instead of 2.
variables['dense_features_weights'] = [
tf.Variable(tf.zeros(
[1], dtype=tf.float32))
]
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
'More dense features than we have parameters for.*'):
train_op.run()
# TODO(katsiaspis): add a test for the case when examples at the end of an
# epoch are repeated, since example id may be duplicated.
class SdcaWithLinearLossTest(SdcaModelTest):
"""SDCA optimizer test class for linear (squared) loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# Predictions should be 2/3 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0],
predictions.eval(),
rtol=0.005)
# Approximate gap should be very close to 0.0. (In fact, because the gap
# is only approximate, it is likely that upon convergence the duality gap
# can have a tiny negative value).
self.assertAllClose(0.0,
lr.approximate_duality_gap().eval(),
atol=1e-2)
def testL2Regularization(self):
# Setup test data
example_protos = [
# 2 identical examples
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
# 2 more identical examples
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=16,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# Predictions should be 1/5 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 + L2 * 16 * weight^2
optimal1 = -10.0 / 5.0
optimal2 = 14.0 / 5.0
self.assertAllClose(
[optimal1, optimal1, optimal2, optimal2],
predictions.eval(),
rtol=0.01)
def testL1Regularization(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=4.0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
prediction = lr.predictions(examples)
loss = lr.regularized_loss(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# Predictions should be -4.0, 48/5 due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight
self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08)
# Loss should be the sum of the regularized loss value from above per
# example after plugging in the optimal weights.
self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01)
def testFeatureValues(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0, -2.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0, 2.0),
]
example_weights = [5.0, 3.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# There are 4 (sparse) variable weights to be learned. 2 for age and 2 for
# gender. Let w_1, w_2 be age weights, w_3, w_4 be gender weights, y_1,
# y_2 be the labels for examples 1 and 2 respectively and s_1, s_2 the
# corresponding *example* weights. With the given feature values, the loss
# function is given by:
# s_1/2(y_1 + 2w_1 + 2w_3)^2 + s_2/2(y_2 - 2w_2 - 2w_4)^2
# + \lambda/2 (w_1^2 + w_2^2 + w_3^2 + w_4^2). Solving for the optimal, it
# can be verified that:
# w_1* = w_3* = -2.0 s_1 y_1/(\lambda + 8 s_1) and
# w_2* = w_4* = 2 \cdot s_2 y_2/(\lambda + 8 s_2). Equivalently, due to
# regularization and example weights, the predictions are within:
# 8 \cdot s_i /(\lambda + 8 \cdot s_i) of the labels.
self.assertAllClose([-10 * 40.0 / 41.0, 14.0 * 24 / 25.0],
predictions.eval(),
atol=0.01)
def testDenseFeaturesWithDefaultWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [0.0]], [0.0, 1.0]],
weights=[1.0, 1.0],
labels=[10.0, -5.0])
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# The loss function for these particular features is given by:
# 1/2(label_1-w_1)^2 + 1/2(label_2-w_2)^2 + \lambda/2 (w_1^2 + w_2^2). So,
# differentiating wrt to w_1, w_2 yields the following optimal values:
# w_1* = label_1/(\lambda + 1)= 10/2, w_2* =label_2/(\lambda + 1)= -5/2.
# In this case the (unnormalized regularized) loss will be:
# 1/2(10-5)^2 + 1/2(5-5/2)^2 + 1/2(5^2 + (5/2)^2) = 125.0/4. The actual
# loss should be further normalized by the sum of example weights.
self.assertAllClose([5.0, -2.5],
predictions.eval(),
rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(125.0 / 8.0, loss.eval(), atol=0.01)
def testDenseFeaturesWithArbitraryWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[10.0, -5.0])
options = dict(symmetric_l2_regularization=5.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# The loss function for these particular features is given by:
# 1/2 s_1 (label_1-w_1)^2 + 1/2 s_2(label_2-w_2)^2 +
# \lambda/2 (w_1^2 + w_2^2) where s_1, s_2 are the *example weights. It
# turns out that the optimal (variable) weights are given by:
# w_1* = label_1 \cdot s_1/(\lambda + s_1)= 8.0 and
# w_2* =label_2 \cdot s_2/(\lambda + s_2)= -10/3.
# In this case the (unnormalized regularized) loss will be:
# s_1/2(8-10)^2 + s_2/2(5-10/3)^2 + 5.0/2(8^2 + (10/3)^2) = 2175.0/9. The
# actual loss should be further normalized by the sum of example weights.
self.assertAllClose([8.0, -10.0/3],
predictions.eval(),
rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(2175.0 / 270.0, loss.eval(), atol=0.01)
class SdcaWithHingeLossTest(SdcaModelTest):
"""SDCA optimizer test class for hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). Solving the system w1 + w3 = 1.0, w2 + w4 = -1.0 and minimizing
# wrt to \|\vec{w}\|_2, gives w1=w3=1/2 and w2=w4=-1/2. This gives 0.0
# unregularized loss and 0.25 L2 loss.
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllEqual([-1.0, 1.0], predictions.eval())
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.0, unregularized_loss.eval())
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.05)
def testDenseFeaturesPerfectlySeparable(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[1.0, 1.0], [1.0, -1.0]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
# (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
# the SVM's functional margin >=1), so the unregularized loss is ~0.0.
# There is only loss due to l2-regularization. For these datapoints, it
# turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesSeparableWithinMargins(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.5], [1.0, -0.5]]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
# are within the margins so there is unregularized loss (1/2 per example).
# For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
# gives an L2 loss of ~0.25.
self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesWeightedExamples(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]],
weights=[3.0, 1.0],
labels=[1.0, 0.0])
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
# try to increase the margin from (1.0, 0.5). Due to regularization,
# (1.0, -0.5) will be within the margin. For these points and example
# weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
# loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
# correct, but the boundary will be much closer to the 2nd point than the
# first one.
self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
class SparseFeatureColumnTest(SdcaModelTest):
"""Tests for SparseFeatureColumn.
"""
def testBasic(self):
expected_example_indices = [1, 1, 1, 2]
expected_feature_indices = [0, 1, 2, 0]
sfc = SparseFeatureColumn(expected_example_indices,
expected_feature_indices, None)
self.assertTrue(isinstance(sfc.example_indices, tf.Tensor))
self.assertTrue(isinstance(sfc.feature_indices, tf.Tensor))
self.assertEqual(sfc.feature_values, None)
with self._single_threaded_test_session():
self.assertAllEqual(expected_example_indices, sfc.example_indices.eval())
self.assertAllEqual(expected_feature_indices, sfc.feature_indices.eval())
expected_feature_values = [1.0, 2.0, 3.0, 4.0]
sfc = SparseFeatureColumn([1, 1, 1, 2], [0, 1, 2, 0],
expected_feature_values)
with self._single_threaded_test_session():
self.assertAllEqual(expected_feature_values, sfc.feature_values.eval())
class SdcaFprintTest(SdcaModelTest):
"""Tests for the SdcaFprint op.
This is one way of enforcing the platform-agnostic nature of SdcaFprint.
Basically we are checking against exact values and this test could be running
across different platforms. Note that it is fine for expected values to change
in the future, if the implementation of SdcaFprint changes (ie this is *not* a
frozen test).
"""
def testFprint(self):
with self._single_threaded_test_session():
in_data = tf.constant(['abc', 'very looooooong string', 'def'])
out_data = _sdca_ops.sdca_fprint(in_data)
self.assertAllEqual(
[b'\x04l\x12\xd2\xaf\xb2\x809E\x9e\x02\x13',
b'\x9f\x0f\x91P\x9aG.Ql\xf2Y\xf9',
b'"0\xe00"\x18_\x08\x12?\xa0\x17'], out_data.eval())
class ShardedMutableHashTableTest(SdcaModelTest):
"""Tests for the _ShardedMutableHashTable class."""
def testShardedMutableHashTable(self):
for num_shards in [1, 3, 10]:
with self._single_threaded_test_session():
default_val = -1
keys = tf.constant(['brain', 'salad', 'surgery'])
values = tf.constant([0, 1, 2], tf.int64)
table = _ShardedMutableHashTable(tf.string,
tf.int64,
default_val,
num_shards=num_shards)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testExportSharded(self):
with self._single_threaded_test_session():
default_val = -1
num_shards = 2
keys = tf.constant(['a1', 'b1', 'c2'])
values = tf.constant([0, 1, 2], tf.int64)
table = _ShardedMutableHashTable(
tf.string, tf.int64, default_val, num_shards=num_shards)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
keys_list, values_list = table.export_sharded()
self.assertAllEqual(num_shards, len(keys_list))
self.assertAllEqual(num_shards, len(values_list))
self.assertAllEqual(set([b'b1', b'c2']), set(keys_list[0].eval()))
self.assertAllEqual([b'a1'], keys_list[1].eval())
self.assertAllEqual(set([1, 2]), set(values_list[0].eval()))
self.assertAllEqual([0], values_list[1].eval())
if __name__ == '__main__':
googletest.main()
|
impl_rabbit.py
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import itertools
import math
import os
import random
import socket
import ssl
import sys
import threading
import time
import uuid
import kombu
import kombu.connection
import kombu.entity
import kombu.messaging
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import eventletutils
from oslo_utils import netutils
import six
from six.moves.urllib import parse
from oslo_messaging._drivers import amqp as rpc_amqp
from oslo_messaging._drivers import amqpdriver
from oslo_messaging._drivers import base
from oslo_messaging._drivers import common as rpc_common
from oslo_messaging._drivers import pool
from oslo_messaging._i18n import _
from oslo_messaging._i18n import _LE
from oslo_messaging._i18n import _LI
from oslo_messaging._i18n import _LW
from oslo_messaging import _utils
from oslo_messaging import exceptions
# NOTE(sileht): don't exists in py2 socket module
TCP_USER_TIMEOUT = 18
rabbit_opts = [
cfg.BoolOpt('ssl',
default=False,
deprecated_name='rabbit_use_ssl',
help='Connect over SSL.'),
cfg.StrOpt('ssl_version',
default='',
deprecated_name='kombu_ssl_version',
help='SSL version to use (valid only if SSL enabled). '
'Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, '
'TLSv1_1, and TLSv1_2 may be available on some '
'distributions.'
),
cfg.StrOpt('ssl_key_file',
default='',
deprecated_name='kombu_ssl_keyfile',
help='SSL key file (valid only if SSL enabled).'),
cfg.StrOpt('ssl_cert_file',
default='',
deprecated_name='kombu_ssl_certfile',
help='SSL cert file (valid only if SSL enabled).'),
cfg.StrOpt('ssl_ca_file',
default='',
deprecated_name='kombu_ssl_ca_certs',
help='SSL certification authority file '
'(valid only if SSL enabled).'),
cfg.FloatOpt('kombu_reconnect_delay',
default=1.0,
deprecated_group='DEFAULT',
help='How long to wait before reconnecting in response to an '
'AMQP consumer cancel notification.'),
cfg.StrOpt('kombu_compression',
help="EXPERIMENTAL: Possible values are: gzip, bz2. If not "
"set compression will not be used. This option may not "
"be available in future versions."),
cfg.IntOpt('kombu_missing_consumer_retry_timeout',
deprecated_name="kombu_reconnect_timeout",
default=60,
help='How long to wait a missing client before abandoning to '
'send it its replies. This value should not be longer '
'than rpc_response_timeout.'),
cfg.StrOpt('kombu_failover_strategy',
choices=('round-robin', 'shuffle'),
default='round-robin',
help='Determines how the next RabbitMQ node is chosen in case '
'the one we are currently connected to becomes '
'unavailable. Takes effect only if more than one '
'RabbitMQ node is provided in config.'),
cfg.StrOpt('rabbit_host',
default='localhost',
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason="Replaced by [DEFAULT]/transport_url",
help='The RabbitMQ broker address where a single node is '
'used.'),
cfg.PortOpt('rabbit_port',
default=5672,
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason="Replaced by [DEFAULT]/transport_url",
help='The RabbitMQ broker port where a single node is used.'),
cfg.ListOpt('rabbit_hosts',
default=['$rabbit_host:$rabbit_port'],
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason="Replaced by [DEFAULT]/transport_url",
help='RabbitMQ HA cluster host:port pairs.'),
cfg.StrOpt('rabbit_userid',
default='guest',
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason="Replaced by [DEFAULT]/transport_url",
help='The RabbitMQ userid.'),
cfg.StrOpt('rabbit_password',
default='guest',
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason="Replaced by [DEFAULT]/transport_url",
help='The RabbitMQ password.',
secret=True),
cfg.StrOpt('rabbit_login_method',
choices=('PLAIN', 'AMQPLAIN', 'RABBIT-CR-DEMO'),
default='AMQPLAIN',
deprecated_group='DEFAULT',
help='The RabbitMQ login method.'),
cfg.StrOpt('rabbit_virtual_host',
default='/',
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason="Replaced by [DEFAULT]/transport_url",
help='The RabbitMQ virtual host.'),
cfg.IntOpt('rabbit_retry_interval',
default=1,
help='How frequently to retry connecting with RabbitMQ.'),
cfg.IntOpt('rabbit_retry_backoff',
default=2,
deprecated_group='DEFAULT',
help='How long to backoff for between retries when connecting '
'to RabbitMQ.'),
cfg.IntOpt('rabbit_interval_max',
default=30,
help='Maximum interval of RabbitMQ connection retries. '
'Default is 30 seconds.'),
cfg.IntOpt('rabbit_max_retries',
default=0,
deprecated_for_removal=True,
deprecated_group='DEFAULT',
help='Maximum number of RabbitMQ connection retries. '
'Default is 0 (infinite retry count).'),
cfg.BoolOpt('rabbit_ha_queues',
default=False,
deprecated_group='DEFAULT',
help='Try to use HA queues in RabbitMQ (x-ha-policy: all). '
'If you change this option, you must wipe the RabbitMQ '
'database. In RabbitMQ 3.0, queue mirroring is no longer '
'controlled by the x-ha-policy argument when declaring a '
'queue. If you just want to make sure that all queues (except '
'those with auto-generated names) are mirrored across all '
'nodes, run: '
"""\"rabbitmqctl set_policy HA '^(?!amq\.).*' """
"""'{"ha-mode": "all"}' \""""),
cfg.IntOpt('rabbit_transient_queues_ttl',
min=1,
default=1800,
help='Positive integer representing duration in seconds for '
'queue TTL (x-expires). Queues which are unused for the '
'duration of the TTL are automatically deleted. The '
'parameter affects only reply and fanout queues.'),
cfg.IntOpt('rabbit_qos_prefetch_count',
default=0,
help='Specifies the number of messages to prefetch. Setting to '
'zero allows unlimited messages.'),
cfg.IntOpt('heartbeat_timeout_threshold',
default=60,
help="Number of seconds after which the Rabbit broker is "
"considered down if heartbeat's keep-alive fails "
"(0 disable the heartbeat). EXPERIMENTAL"),
cfg.IntOpt('heartbeat_rate',
default=2,
help='How often times during the heartbeat_timeout_threshold '
'we check the heartbeat.'),
# NOTE(sileht): deprecated option since oslo_messaging 1.5.0,
cfg.BoolOpt('fake_rabbit',
default=False,
deprecated_group='DEFAULT',
help='Deprecated, use rpc_backend=kombu+memory or '
'rpc_backend=fake'),
]
LOG = logging.getLogger(__name__)
def _get_queue_arguments(rabbit_ha_queues, rabbit_queue_ttl):
"""Construct the arguments for declaring a queue.
If the rabbit_ha_queues option is set, we try to declare a mirrored queue
as described here:
http://www.rabbitmq.com/ha.html
Setting x-ha-policy to all means that the queue will be mirrored
to all nodes in the cluster. In RabbitMQ 3.0, queue mirroring is
no longer controlled by the x-ha-policy argument when declaring a
queue. If you just want to make sure that all queues (except those
with auto-generated names) are mirrored across all nodes, run:
rabbitmqctl set_policy HA '^(?!amq\.).*' '{"ha-mode": "all"}'
If the rabbit_queue_ttl option is > 0, then the queue is
declared with the "Queue TTL" value as described here:
https://www.rabbitmq.com/ttl.html
Setting a queue TTL causes the queue to be automatically deleted
if it is unused for the TTL duration. This is a helpful safeguard
to prevent queues with zero consumers from growing without bound.
"""
args = {}
if rabbit_ha_queues:
args['x-ha-policy'] = 'all'
if rabbit_queue_ttl > 0:
args['x-expires'] = rabbit_queue_ttl * 1000
return args
class RabbitMessage(dict):
def __init__(self, raw_message):
super(RabbitMessage, self).__init__(
rpc_common.deserialize_msg(raw_message.payload))
LOG.trace('RabbitMessage.Init: message %s', self)
self._raw_message = raw_message
def acknowledge(self):
LOG.trace('RabbitMessage.acknowledge: message %s', self)
self._raw_message.ack()
def requeue(self):
LOG.trace('RabbitMessage.requeue: message %s', self)
self._raw_message.requeue()
class Consumer(object):
"""Consumer class."""
def __init__(self, exchange_name, queue_name, routing_key, type, durable,
exchange_auto_delete, queue_auto_delete, callback,
nowait=False, rabbit_ha_queues=None, rabbit_queue_ttl=0):
"""Init the Consumer class with the exchange_name, routing_key,
type, durable auto_delete
"""
self.queue_name = queue_name
self.exchange_name = exchange_name
self.routing_key = routing_key
self.exchange_auto_delete = exchange_auto_delete
self.queue_auto_delete = queue_auto_delete
self.durable = durable
self.callback = callback
self.type = type
self.nowait = nowait
self.queue_arguments = _get_queue_arguments(rabbit_ha_queues,
rabbit_queue_ttl)
self.queue = None
self._declared_on = None
self.exchange = kombu.entity.Exchange(
name=exchange_name,
type=type,
durable=self.durable,
auto_delete=self.exchange_auto_delete)
def declare(self, conn):
"""Re-declare the queue after a rabbit (re)connect."""
self.queue = kombu.entity.Queue(
name=self.queue_name,
channel=conn.channel,
exchange=self.exchange,
durable=self.durable,
auto_delete=self.queue_auto_delete,
routing_key=self.routing_key,
queue_arguments=self.queue_arguments)
try:
LOG.debug('[%s] Queue.declare: %s',
conn.connection_id, self.queue_name)
self.queue.declare()
except conn.connection.channel_errors as exc:
# NOTE(jrosenboom): This exception may be triggered by a race
# condition. Simply retrying will solve the error most of the time
# and should work well enough as a workaround until the race
# condition itself can be fixed.
# See https://bugs.launchpad.net/neutron/+bug/1318721 for details.
if exc.code == 404:
self.queue.declare()
else:
raise
self._declared_on = conn.channel
def consume(self, conn, tag):
"""Actually declare the consumer on the amqp channel. This will
start the flow of messages from the queue. Using the
Connection.consume() will process the messages,
calling the appropriate callback.
"""
# Ensure we are on the correct channel before consuming
if conn.channel != self._declared_on:
self.declare(conn)
try:
self.queue.consume(callback=self._callback,
consumer_tag=six.text_type(tag),
nowait=self.nowait)
except conn.connection.channel_errors as exc:
# We retries once because of some races that we can
# recover before informing the deployer
# bugs.launchpad.net/oslo.messaging/+bug/1581148
# bugs.launchpad.net/oslo.messaging/+bug/1609766
# bugs.launchpad.net/neutron/+bug/1318721
# 406 error code relates to messages that are doubled ack'd
# At any channel error, the RabbitMQ closes
# the channel, but the amqp-lib quietly re-open
# it. So, we must reset all tags and declare
# all consumers again.
conn._new_tags = set(conn._consumers.values())
if exc.code == 404 or (exc.code == 406 and
exc.method_name == 'Basic.ack'):
self.declare(conn)
self.queue.consume(callback=self._callback,
consumer_tag=six.text_type(tag),
nowait=self.nowait)
else:
raise
def cancel(self, tag):
LOG.trace('ConsumerBase.cancel: canceling %s', tag)
self.queue.cancel(six.text_type(tag))
def _callback(self, message):
"""Call callback with deserialized message.
Messages that are processed and ack'ed.
"""
m2p = getattr(self.queue.channel, 'message_to_python', None)
if m2p:
message = m2p(message)
try:
self.callback(RabbitMessage(message))
except Exception:
LOG.exception(_LE("Failed to process message"
" ... skipping it."))
message.reject()
class DummyConnectionLock(_utils.DummyLock):
def heartbeat_acquire(self):
pass
class ConnectionLock(DummyConnectionLock):
"""Lock object to protect access to the kombu connection
This is a lock object to protect access to the kombu connection
object between the heartbeat thread and the driver thread.
They are two way to acquire this lock:
* lock.acquire()
* lock.heartbeat_acquire()
In both case lock.release(), release the lock.
The goal is that the heartbeat thread always have the priority
for acquiring the lock. This ensures we have no heartbeat
starvation when the driver sends a lot of messages.
So when lock.heartbeat_acquire() is called next time the lock
is released(), the caller unconditionally acquires
the lock, even someone else have asked for the lock before it.
"""
def __init__(self):
self._workers_waiting = 0
self._heartbeat_waiting = False
self._lock_acquired = None
self._monitor = threading.Lock()
self._workers_locks = threading.Condition(self._monitor)
self._heartbeat_lock = threading.Condition(self._monitor)
self._get_thread_id = eventletutils.fetch_current_thread_functor()
def acquire(self):
with self._monitor:
while self._lock_acquired:
self._workers_waiting += 1
self._workers_locks.wait()
self._workers_waiting -= 1
self._lock_acquired = self._get_thread_id()
def heartbeat_acquire(self):
# NOTE(sileht): must be called only one time
with self._monitor:
while self._lock_acquired is not None:
self._heartbeat_waiting = True
self._heartbeat_lock.wait()
self._heartbeat_waiting = False
self._lock_acquired = self._get_thread_id()
def release(self):
with self._monitor:
if self._lock_acquired is None:
raise RuntimeError("We can't release a not acquired lock")
thread_id = self._get_thread_id()
if self._lock_acquired != thread_id:
raise RuntimeError("We can't release lock acquired by another "
"thread/greenthread; %s vs %s" %
(self._lock_acquired, thread_id))
self._lock_acquired = None
if self._heartbeat_waiting:
self._heartbeat_lock.notify()
elif self._workers_waiting > 0:
self._workers_locks.notify()
@contextlib.contextmanager
def for_heartbeat(self):
self.heartbeat_acquire()
try:
yield
finally:
self.release()
class Connection(object):
"""Connection object."""
pools = {}
def __init__(self, conf, url, purpose):
# NOTE(viktors): Parse config options
driver_conf = conf.oslo_messaging_rabbit
self.max_retries = driver_conf.rabbit_max_retries
self.interval_start = driver_conf.rabbit_retry_interval
self.interval_stepping = driver_conf.rabbit_retry_backoff
self.interval_max = driver_conf.rabbit_interval_max
self.login_method = driver_conf.rabbit_login_method
self.fake_rabbit = driver_conf.fake_rabbit
self.virtual_host = driver_conf.rabbit_virtual_host
self.rabbit_hosts = driver_conf.rabbit_hosts
self.rabbit_port = driver_conf.rabbit_port
self.rabbit_userid = driver_conf.rabbit_userid
self.rabbit_password = driver_conf.rabbit_password
self.rabbit_ha_queues = driver_conf.rabbit_ha_queues
self.rabbit_transient_queues_ttl = \
driver_conf.rabbit_transient_queues_ttl
self.rabbit_qos_prefetch_count = driver_conf.rabbit_qos_prefetch_count
self.heartbeat_timeout_threshold = \
driver_conf.heartbeat_timeout_threshold
self.heartbeat_rate = driver_conf.heartbeat_rate
self.kombu_reconnect_delay = driver_conf.kombu_reconnect_delay
self.amqp_durable_queues = driver_conf.amqp_durable_queues
self.amqp_auto_delete = driver_conf.amqp_auto_delete
self.ssl = driver_conf.ssl
self.kombu_missing_consumer_retry_timeout = \
driver_conf.kombu_missing_consumer_retry_timeout
self.kombu_failover_strategy = driver_conf.kombu_failover_strategy
self.kombu_compression = driver_conf.kombu_compression
if self.ssl:
self.ssl_version = driver_conf.ssl_version
self.ssl_key_file = driver_conf.ssl_key_file
self.ssl_cert_file = driver_conf.ssl_cert_file
self.ssl_ca_file = driver_conf.ssl_ca_file
# Try forever?
if self.max_retries <= 0:
self.max_retries = None
if url.virtual_host is not None:
virtual_host = url.virtual_host
else:
virtual_host = self.virtual_host
self._url = ''
if self.fake_rabbit:
LOG.warning(_LW("Deprecated: fake_rabbit option is deprecated, "
"set rpc_backend to kombu+memory or use the fake "
"driver instead."))
self._url = 'memory://%s/' % virtual_host
elif url.hosts:
if url.transport.startswith('kombu+'):
LOG.warning(_LW('Selecting the kombu transport through the '
'transport url (%s) is a experimental feature '
'and this is not yet supported.'),
url.transport)
if len(url.hosts) > 1:
random.shuffle(url.hosts)
for host in url.hosts:
transport = url.transport.replace('kombu+', '')
transport = transport.replace('rabbit', 'amqp')
self._url += '%s%s://%s:%s@%s:%s/%s' % (
";" if self._url else '',
transport,
parse.quote(host.username or ''),
parse.quote(host.password or ''),
self._parse_url_hostname(host.hostname) or '',
str(host.port or 5672),
virtual_host)
elif url.transport.startswith('kombu+'):
# NOTE(sileht): url have a + but no hosts
# (like kombu+memory:///), pass it to kombu as-is
transport = url.transport.replace('kombu+', '')
self._url = "%s://%s" % (transport, virtual_host)
else:
if len(self.rabbit_hosts) > 1:
random.shuffle(self.rabbit_hosts)
for adr in self.rabbit_hosts:
hostname, port = netutils.parse_host_port(
adr, default_port=self.rabbit_port)
self._url += '%samqp://%s:%s@%s:%s/%s' % (
";" if self._url else '',
parse.quote(self.rabbit_userid, ''),
parse.quote(self.rabbit_password, ''),
self._parse_url_hostname(hostname), port,
virtual_host)
self._initial_pid = os.getpid()
self._consumers = {}
self._producer = None
self._new_tags = set()
self._active_tags = {}
self._tags = itertools.count(1)
# Set of exchanges and queues declared on the channel to avoid
# unnecessary redeclaration. This set is resetted each time
# the connection is resetted in Connection._set_current_channel
self._declared_exchanges = set()
self._declared_queues = set()
self._consume_loop_stopped = False
self.channel = None
self.purpose = purpose
# NOTE(sileht): if purpose is PURPOSE_LISTEN
# we don't need the lock because we don't
# have a heartbeat thread
if purpose == rpc_common.PURPOSE_SEND:
self._connection_lock = ConnectionLock()
else:
self._connection_lock = DummyConnectionLock()
self.connection_id = str(uuid.uuid4())
self.name = "%s:%d:%s" % (os.path.basename(sys.argv[0]),
os.getpid(),
self.connection_id)
self.connection = kombu.connection.Connection(
self._url, ssl=self._fetch_ssl_params(),
login_method=self.login_method,
heartbeat=self.heartbeat_timeout_threshold,
failover_strategy=self.kombu_failover_strategy,
transport_options={
'confirm_publish': True,
'client_properties': {
'capabilities': {
'authentication_failure_close': True,
'connection.blocked': True,
'consumer_cancel_notify': True
},
'connection_name': self.name},
'on_blocked': self._on_connection_blocked,
'on_unblocked': self._on_connection_unblocked,
},
)
LOG.debug('[%(connection_id)s] Connecting to AMQP server on'
' %(hostname)s:%(port)s',
self._get_connection_info())
# NOTE(sileht): kombu recommend to run heartbeat_check every
# seconds, but we use a lock around the kombu connection
# so, to not lock to much this lock to most of the time do nothing
# expected waiting the events drain, we start heartbeat_check and
# retrieve the server heartbeat packet only two times more than
# the minimum required for the heartbeat works
# (heatbeat_timeout/heartbeat_rate/2.0, default kombu
# heartbeat_rate is 2)
self._heartbeat_wait_timeout = (
float(self.heartbeat_timeout_threshold) /
float(self.heartbeat_rate) / 2.0)
self._heartbeat_support_log_emitted = False
# NOTE(sileht): just ensure the connection is setuped at startup
with self._connection_lock:
self.ensure_connection()
# NOTE(sileht): if purpose is PURPOSE_LISTEN
# the consume code does the heartbeat stuff
# we don't need a thread
self._heartbeat_thread = None
if purpose == rpc_common.PURPOSE_SEND:
self._heartbeat_start()
LOG.debug('[%(connection_id)s] Connected to AMQP server on '
'%(hostname)s:%(port)s via [%(transport)s] client with'
' port %(client_port)s.',
self._get_connection_info())
# NOTE(sileht): value chosen according the best practice from kombu
# http://kombu.readthedocs.org/en/latest/reference/kombu.common.html#kombu.common.eventloop
# For heatbeat, we can set a bigger timeout, and check we receive the
# heartbeat packets regulary
if self._heartbeat_supported_and_enabled():
self._poll_timeout = self._heartbeat_wait_timeout
else:
self._poll_timeout = 1
if self._url.startswith('memory://'):
# Kludge to speed up tests.
self.connection.transport.polling_interval = 0.0
# Fixup logging
self.connection.hostname = "memory_driver"
self.connection.port = 1234
self._poll_timeout = 0.05
# FIXME(markmc): use oslo sslutils when it is available as a library
_SSL_PROTOCOLS = {
"tlsv1": ssl.PROTOCOL_TLSv1,
"sslv23": ssl.PROTOCOL_SSLv23
}
_OPTIONAL_PROTOCOLS = {
'sslv2': 'PROTOCOL_SSLv2',
'sslv3': 'PROTOCOL_SSLv3',
'tlsv1_1': 'PROTOCOL_TLSv1_1',
'tlsv1_2': 'PROTOCOL_TLSv1_2',
}
for protocol in _OPTIONAL_PROTOCOLS:
try:
_SSL_PROTOCOLS[protocol] = getattr(ssl,
_OPTIONAL_PROTOCOLS[protocol])
except AttributeError:
pass
@classmethod
def validate_ssl_version(cls, version):
key = version.lower()
try:
return cls._SSL_PROTOCOLS[key]
except KeyError:
raise RuntimeError(_("Invalid SSL version : %s") % version)
def _parse_url_hostname(self, hostname):
"""Handles hostname returned from urlparse and checks whether it's
ipaddress. If it's ipaddress it ensures that it has brackets for IPv6.
"""
return '[%s]' % hostname if ':' in hostname else hostname
def _fetch_ssl_params(self):
"""Handles fetching what ssl params should be used for the connection
(if any).
"""
if self.ssl:
ssl_params = dict()
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
if self.ssl_version:
ssl_params['ssl_version'] = self.validate_ssl_version(
self.ssl_version)
if self.ssl_key_file:
ssl_params['keyfile'] = self.ssl_key_file
if self.ssl_cert_file:
ssl_params['certfile'] = self.ssl_cert_file
if self.ssl_ca_file:
ssl_params['ca_certs'] = self.ssl_ca_file
# We might want to allow variations in the
# future with this?
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
return ssl_params or True
return False
@staticmethod
def _on_connection_blocked(reason):
LOG.error(_LE("The broker has blocked the connection: %s"), reason)
@staticmethod
def _on_connection_unblocked():
LOG.info(_LI("The broker has unblocked the connection"))
def ensure_connection(self):
# NOTE(sileht): we reset the channel and ensure
# the kombu underlying connection works
self._set_current_channel(None)
self.ensure(method=self.connection.connect)
self.set_transport_socket_timeout()
def ensure(self, method, retry=None,
recoverable_error_callback=None, error_callback=None,
timeout_is_error=True):
"""Will retry up to retry number of times.
retry = None means use the value of rabbit_max_retries
retry = -1 means to retry forever
retry = 0 means no retry
retry = N means N retries
NOTE(sileht): Must be called within the connection lock
"""
current_pid = os.getpid()
if self._initial_pid != current_pid:
LOG.warning(_LW("Process forked after connection established! "
"This can result in unpredictable behavior. "
"See: https://docs.openstack.org/oslo.messaging/"
"latest/reference/transport.html"))
self._initial_pid = current_pid
if retry is None:
retry = self.max_retries
if retry is None or retry < 0:
retry = None
def on_error(exc, interval):
LOG.debug("[%s] Received recoverable error from kombu:"
% self.connection_id,
exc_info=True)
recoverable_error_callback and recoverable_error_callback(exc)
interval = (self.kombu_reconnect_delay + interval
if self.kombu_reconnect_delay > 0
else interval)
info = {'err_str': exc, 'sleep_time': interval}
info.update(self._get_connection_info())
if 'Socket closed' in six.text_type(exc):
LOG.error(_LE('[%(connection_id)s] AMQP server'
' %(hostname)s:%(port)s closed'
' the connection. Check login credentials:'
' %(err_str)s'), info)
else:
LOG.error(_LE('[%(connection_id)s] AMQP server on '
'%(hostname)s:%(port)s is unreachable: '
'%(err_str)s. Trying again in '
'%(sleep_time)d seconds. Client port: '
'%(client_port)s'), info)
# XXX(nic): when reconnecting to a RabbitMQ cluster
# with mirrored queues in use, the attempt to release the
# connection can hang "indefinitely" somewhere deep down
# in Kombu. Blocking the thread for a bit prior to
# release seems to kludge around the problem where it is
# otherwise reproduceable.
# TODO(sileht): Check if this is useful since we
# use kombu for HA connection, the interval_step
# should sufficient, because the underlying kombu transport
# connection object freed.
if self.kombu_reconnect_delay > 0:
LOG.trace('Delaying reconnect for %1.1f seconds ...',
self.kombu_reconnect_delay)
time.sleep(self.kombu_reconnect_delay)
def on_reconnection(new_channel):
"""Callback invoked when the kombu reconnects and creates
a new channel, we use it the reconfigure our consumers.
"""
self._set_current_channel(new_channel)
self.set_transport_socket_timeout()
LOG.info(_LI('[%(connection_id)s] Reconnected to AMQP server on '
'%(hostname)s:%(port)s via [%(transport)s] client '
'with port %(client_port)s.'),
self._get_connection_info())
def execute_method(channel):
self._set_current_channel(channel)
method()
try:
autoretry_method = self.connection.autoretry(
execute_method, channel=self.channel,
max_retries=retry,
errback=on_error,
interval_start=self.interval_start or 1,
interval_step=self.interval_stepping,
interval_max=self.interval_max,
on_revive=on_reconnection)
ret, channel = autoretry_method()
self._set_current_channel(channel)
return ret
except kombu.exceptions.OperationalError as exc:
LOG.debug("Received recoverable error from kombu:",
exc_info=True)
error_callback and error_callback(exc)
self._set_current_channel(None)
# NOTE(sileht): number of retry exceeded and the connection
# is still broken
info = {'err_str': exc, 'retry': retry}
info.update(self.connection.info())
msg = _('Unable to connect to AMQP server on '
'%(hostname)s:%(port)s after %(retry)s '
'tries: %(err_str)s') % info
LOG.error(msg)
raise exceptions.MessageDeliveryFailure(msg)
except rpc_amqp.AMQPDestinationNotFound:
# NOTE(sileht): we must reraise this without
# trigger error_callback
raise
except Exception as exc:
error_callback and error_callback(exc)
raise
def _set_current_channel(self, new_channel):
"""Change the channel to use.
NOTE(sileht): Must be called within the connection lock
"""
if new_channel == self.channel:
return
if self.channel is not None:
self._declared_queues.clear()
self._declared_exchanges.clear()
self.connection.maybe_close_channel(self.channel)
self.channel = new_channel
if new_channel is not None:
if self.purpose == rpc_common.PURPOSE_LISTEN:
self._set_qos(new_channel)
self._producer = kombu.messaging.Producer(new_channel)
for consumer in self._consumers:
consumer.declare(self)
def _set_qos(self, channel):
"""Set QoS prefetch count on the channel"""
if self.rabbit_qos_prefetch_count > 0:
channel.basic_qos(0,
self.rabbit_qos_prefetch_count,
False)
def close(self):
"""Close/release this connection."""
self._heartbeat_stop()
if self.connection:
for consumer, tag in self._consumers.items():
if consumer.type == 'fanout':
LOG.debug('[connection close] Deleting fanout '
'queue: %s ' % consumer.queue.name)
consumer.queue.delete()
self._set_current_channel(None)
self.connection.release()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
with self._connection_lock:
try:
for consumer, tag in self._consumers.items():
consumer.cancel(tag=tag)
except kombu.exceptions.OperationalError:
self.ensure_connection()
self._consumers.clear()
self._active_tags.clear()
self._new_tags.clear()
self._tags = itertools.count(1)
def _heartbeat_supported_and_enabled(self):
if self.heartbeat_timeout_threshold <= 0:
return False
if self.connection.supports_heartbeats:
return True
elif not self._heartbeat_support_log_emitted:
LOG.warning(_LW("Heartbeat support requested but it is not "
"supported by the kombu driver or the broker"))
self._heartbeat_support_log_emitted = True
return False
def set_transport_socket_timeout(self, timeout=None):
# NOTE(sileht): they are some case where the heartbeat check
# or the producer.send return only when the system socket
# timeout if reach. kombu doesn't allow use to customise this
# timeout so for py-amqp we tweak ourself
# NOTE(dmitryme): Current approach works with amqp==1.4.9 and
# kombu==3.0.33. Once the commit below is released, we should
# try to set the socket timeout in the constructor:
# https://github.com/celery/py-amqp/pull/64
heartbeat_timeout = self.heartbeat_timeout_threshold
if self._heartbeat_supported_and_enabled():
# NOTE(sileht): we are supposed to send heartbeat every
# heartbeat_timeout, no need to wait more otherwise will
# disconnect us, so raise timeout earlier ourself
if timeout is None:
timeout = heartbeat_timeout
else:
timeout = min(heartbeat_timeout, timeout)
try:
sock = self.channel.connection.sock
except AttributeError as e:
# Level is set to debug because otherwise we would spam the logs
LOG.debug('[%s] Failed to get socket attribute: %s'
% (self.connection_id, str(e)))
else:
sock.settimeout(timeout)
# TCP_USER_TIMEOUT is not defined on Windows and Mac OS X
if sys.platform != 'win32' and sys.platform != 'darwin':
try:
timeout = timeout * 1000 if timeout is not None else 0
# NOTE(gdavoian): only integers and strings are allowed
# as socket options' values, and TCP_USER_TIMEOUT option
# can take only integer values, so we round-up the timeout
# to the nearest integer in order to ensure that the
# connection is not broken before the expected timeout
sock.setsockopt(socket.IPPROTO_TCP,
TCP_USER_TIMEOUT,
int(math.ceil(timeout)))
except socket.error as error:
code = error[0]
# TCP_USER_TIMEOUT not defined on kernels <2.6.37
if code != errno.ENOPROTOOPT:
raise
@contextlib.contextmanager
def _transport_socket_timeout(self, timeout):
self.set_transport_socket_timeout(timeout)
yield
self.set_transport_socket_timeout()
def _heartbeat_check(self):
# NOTE(sileht): we are supposed to send at least one heartbeat
# every heartbeat_timeout_threshold, so no need to way more
self.connection.heartbeat_check(rate=self.heartbeat_rate)
def _heartbeat_start(self):
if self._heartbeat_supported_and_enabled():
self._heartbeat_exit_event = eventletutils.Event()
self._heartbeat_thread = threading.Thread(
target=self._heartbeat_thread_job)
self._heartbeat_thread.daemon = True
self._heartbeat_thread.start()
else:
self._heartbeat_thread = None
def _heartbeat_stop(self):
if self._heartbeat_thread is not None:
self._heartbeat_exit_event.set()
self._heartbeat_thread.join()
self._heartbeat_thread = None
def _heartbeat_thread_job(self):
"""Thread that maintains inactive connections
"""
while not self._heartbeat_exit_event.is_set():
with self._connection_lock.for_heartbeat():
try:
try:
self._heartbeat_check()
# NOTE(sileht): We need to drain event to receive
# heartbeat from the broker but don't hold the
# connection too much times. In amqpdriver a connection
# is used exclusively for read or for write, so we have
# to do this for connection used for write drain_events
# already do that for other connection
try:
self.connection.drain_events(timeout=0.001)
except socket.timeout:
pass
except kombu.exceptions.OperationalError as exc:
LOG.info(_LI("A recoverable connection/channel error "
"occurred, trying to reconnect: %s"), exc)
self.ensure_connection()
except Exception:
LOG.warning(_LW("Unexpected error during heartbeart "
"thread processing, retrying..."))
LOG.debug('Exception', exc_info=True)
self._heartbeat_exit_event.wait(
timeout=self._heartbeat_wait_timeout)
self._heartbeat_exit_event.clear()
def declare_consumer(self, consumer):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': consumer.routing_key, 'err_str': exc}
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s"), log_info)
def _declare_consumer():
consumer.declare(self)
tag = self._active_tags.get(consumer.queue_name)
if tag is None:
tag = next(self._tags)
self._active_tags[consumer.queue_name] = tag
self._new_tags.add(tag)
self._consumers[consumer] = tag
return consumer
with self._connection_lock:
return self.ensure(_declare_consumer,
error_callback=_connect_error)
def consume(self, timeout=None):
"""Consume from all queues/consumers."""
timer = rpc_common.DecayingTimer(duration=timeout)
timer.start()
def _raise_timeout(exc):
LOG.debug('Timed out waiting for RPC response: %s', exc)
raise rpc_common.Timeout()
def _recoverable_error_callback(exc):
if not isinstance(exc, rpc_common.Timeout):
self._new_tags = set(self._consumers.values())
timer.check_return(_raise_timeout, exc)
def _error_callback(exc):
_recoverable_error_callback(exc)
LOG.error(_LE('Failed to consume message from queue: %s'),
exc)
def _consume():
# NOTE(sileht): in case the acknowledgment or requeue of a
# message fail, the kombu transport can be disconnected
# In this case, we must redeclare our consumers, so raise
# a recoverable error to trigger the reconnection code.
if not self.connection.connected:
raise self.connection.recoverable_connection_errors[0]
while self._new_tags:
for consumer, tag in self._consumers.items():
if tag in self._new_tags:
consumer.consume(self, tag=tag)
self._new_tags.remove(tag)
poll_timeout = (self._poll_timeout if timeout is None
else min(timeout, self._poll_timeout))
while True:
if self._consume_loop_stopped:
return
if self._heartbeat_supported_and_enabled():
self._heartbeat_check()
try:
self.connection.drain_events(timeout=poll_timeout)
return
except socket.timeout as exc:
poll_timeout = timer.check_return(
_raise_timeout, exc, maximum=self._poll_timeout)
except self.connection.channel_errors as exc:
if exc.code == 406 and exc.method_name == 'Basic.ack':
# NOTE(gordc): occasionally multiple workers will grab
# same message and acknowledge it. if it happens, meh.
raise self.connection.recoverable_channel_errors[0]
raise
with self._connection_lock:
self.ensure(_consume,
recoverable_error_callback=_recoverable_error_callback,
error_callback=_error_callback)
def stop_consuming(self):
self._consume_loop_stopped = True
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
consumer = Consumer(exchange_name=topic,
queue_name=topic,
routing_key=topic,
type='direct',
durable=False,
exchange_auto_delete=True,
queue_auto_delete=False,
callback=callback,
rabbit_ha_queues=self.rabbit_ha_queues,
rabbit_queue_ttl=self.rabbit_transient_queues_ttl)
self.declare_consumer(consumer)
def declare_topic_consumer(self, exchange_name, topic, callback=None,
queue_name=None):
"""Create a 'topic' consumer."""
consumer = Consumer(exchange_name=exchange_name,
queue_name=queue_name or topic,
routing_key=topic,
type='topic',
durable=self.amqp_durable_queues,
exchange_auto_delete=self.amqp_auto_delete,
queue_auto_delete=self.amqp_auto_delete,
callback=callback,
rabbit_ha_queues=self.rabbit_ha_queues)
self.declare_consumer(consumer)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
unique = uuid.uuid4().hex
exchange_name = '%s_fanout' % topic
queue_name = '%s_fanout_%s' % (topic, unique)
consumer = Consumer(exchange_name=exchange_name,
queue_name=queue_name,
routing_key=topic,
type='fanout',
durable=False,
exchange_auto_delete=True,
queue_auto_delete=False,
callback=callback,
rabbit_ha_queues=self.rabbit_ha_queues,
rabbit_queue_ttl=self.rabbit_transient_queues_ttl)
self.declare_consumer(consumer)
def _ensure_publishing(self, method, exchange, msg, routing_key=None,
timeout=None, retry=None):
"""Send to a publisher based on the publisher class."""
def _error_callback(exc):
log_info = {'topic': exchange.name, 'err_str': exc}
LOG.error(_LE("Failed to publish message to topic "
"'%(topic)s': %(err_str)s"), log_info)
LOG.debug('Exception', exc_info=exc)
method = functools.partial(method, exchange, msg, routing_key, timeout)
with self._connection_lock:
self.ensure(method, retry=retry, error_callback=_error_callback)
def _get_connection_info(self):
info = self.connection.info()
client_port = None
if (self.channel and hasattr(self.channel.connection, 'sock')
and self.channel.connection.sock):
client_port = self.channel.connection.sock.getsockname()[1]
info.update({'client_port': client_port,
'connection_id': self.connection_id})
return info
def _publish(self, exchange, msg, routing_key=None, timeout=None):
"""Publish a message."""
if not (exchange.passive or exchange.name in self._declared_exchanges):
exchange(self.channel).declare()
self._declared_exchanges.add(exchange.name)
log_info = {'msg': msg,
'who': exchange or 'default',
'key': routing_key}
LOG.trace('Connection._publish: sending message %(msg)s to'
' %(who)s with routing key %(key)s', log_info)
# NOTE(sileht): no need to wait more, caller expects
# a answer before timeout is reached
with self._transport_socket_timeout(timeout):
self._producer.publish(msg,
exchange=exchange,
routing_key=routing_key,
expiration=timeout,
compression=self.kombu_compression)
def _publish_and_creates_default_queue(self, exchange, msg,
routing_key=None, timeout=None):
"""Publisher that declares a default queue
When the exchange is missing instead of silently creates an exchange
not binded to a queue, this publisher creates a default queue
named with the routing_key
This is mainly used to not miss notification in case of nobody consumes
them yet. If the future consumer bind the default queue it can retrieve
missing messages.
_set_current_channel is responsible to cleanup the cache.
"""
queue_indentifier = (exchange.name, routing_key)
# NOTE(sileht): We only do it once per reconnection
# the Connection._set_current_channel() is responsible to clear
# this cache
if queue_indentifier not in self._declared_queues:
queue = kombu.entity.Queue(
channel=self.channel,
exchange=exchange,
durable=exchange.durable,
auto_delete=exchange.auto_delete,
name=routing_key,
routing_key=routing_key,
queue_arguments=_get_queue_arguments(self.rabbit_ha_queues, 0))
log_info = {'key': routing_key, 'exchange': exchange}
LOG.trace(
'Connection._publish_and_creates_default_queue: '
'declare queue %(key)s on %(exchange)s exchange', log_info)
queue.declare()
self._declared_queues.add(queue_indentifier)
self._publish(exchange, msg, routing_key=routing_key, timeout=timeout)
def _publish_and_raises_on_missing_exchange(self, exchange, msg,
routing_key=None,
timeout=None):
"""Publisher that raises exception if exchange is missing."""
if not exchange.passive:
raise RuntimeError("_publish_and_retry_on_missing_exchange() must "
"be called with an passive exchange.")
try:
self._publish(exchange, msg, routing_key=routing_key,
timeout=timeout)
return
except self.connection.channel_errors as exc:
if exc.code == 404:
# NOTE(noelbk/sileht):
# If rabbit dies, the consumer can be disconnected before the
# publisher sends, and if the consumer hasn't declared the
# queue, the publisher's will send a message to an exchange
# that's not bound to a queue, and the message wll be lost.
# So we set passive=True to the publisher exchange and catch
# the 404 kombu ChannelError and retry until the exchange
# appears
raise rpc_amqp.AMQPDestinationNotFound(
"exchange %s doesn't exists" % exchange.name)
raise
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
exchange = kombu.entity.Exchange(name=msg_id,
type='direct',
durable=False,
auto_delete=True,
passive=True)
self._ensure_publishing(self._publish_and_raises_on_missing_exchange,
exchange, msg, routing_key=msg_id)
def topic_send(self, exchange_name, topic, msg, timeout=None, retry=None):
"""Send a 'topic' message."""
exchange = kombu.entity.Exchange(
name=exchange_name,
type='topic',
durable=self.amqp_durable_queues,
auto_delete=self.amqp_auto_delete)
self._ensure_publishing(self._publish, exchange, msg,
routing_key=topic, timeout=timeout,
retry=retry)
def fanout_send(self, topic, msg, retry=None):
"""Send a 'fanout' message."""
exchange = kombu.entity.Exchange(name='%s_fanout' % topic,
type='fanout',
durable=False,
auto_delete=True)
self._ensure_publishing(self._publish, exchange, msg, retry=retry)
def notify_send(self, exchange_name, topic, msg, retry=None, **kwargs):
"""Send a notify message on a topic."""
exchange = kombu.entity.Exchange(
name=exchange_name,
type='topic',
durable=self.amqp_durable_queues,
auto_delete=self.amqp_auto_delete)
self._ensure_publishing(self._publish_and_creates_default_queue,
exchange, msg, routing_key=topic, retry=retry)
class RabbitDriver(amqpdriver.AMQPDriverBase):
"""RabbitMQ Driver
The ``rabbit`` driver is the default driver used in OpenStack's
integration tests.
The driver is aliased as ``kombu`` to support upgrading existing
installations with older settings.
"""
def __init__(self, conf, url,
default_exchange=None,
allowed_remote_exmods=None):
opt_group = cfg.OptGroup(name='oslo_messaging_rabbit',
title='RabbitMQ driver options')
conf.register_group(opt_group)
conf.register_opts(rabbit_opts, group=opt_group)
conf.register_opts(rpc_amqp.amqp_opts, group=opt_group)
conf.register_opts(base.base_opts, group=opt_group)
conf = rpc_common.ConfigOptsProxy(conf, url, opt_group.name)
self.missing_destination_retry_timeout = (
conf.oslo_messaging_rabbit.kombu_missing_consumer_retry_timeout)
self.prefetch_size = (
conf.oslo_messaging_rabbit.rabbit_qos_prefetch_count)
# the pool configuration properties
max_size = conf.oslo_messaging_rabbit.rpc_conn_pool_size
min_size = conf.oslo_messaging_rabbit.conn_pool_min_size
ttl = conf.oslo_messaging_rabbit.conn_pool_ttl
connection_pool = pool.ConnectionPool(
conf, max_size, min_size, ttl,
url, Connection)
super(RabbitDriver, self).__init__(
conf, url,
connection_pool,
default_exchange,
allowed_remote_exmods
)
def require_features(self, requeue=True):
pass
|
SuperchargedBots.py
|
from __future__ import annotations
import configparser
import math
import os
from threading import Thread
from traceback import print_exc
from typing import List
import numpy as np
from rlbot.agents.base_script import BaseScript
from rlbot.messages.flat.PlayerInputChange import PlayerInputChange
from rlbot.socket.socket_manager import SocketRelay
from rlbot.utils.game_state_util import (BallState, CarState, GameState,
Physics, Vector3)
from rlbot.utils.structures.game_data_struct import GameTickPacket
BOOST_ACCEL = 991 + 2/3
BOOST_CONSUMPTION = 33 + 1/3
DEFAULT_CAR = {
"boosting": False,
"steering": False,
"total_boost": BOOST_CONSUMPTION,
"last_boost": BOOST_CONSUMPTION
}
def cap(x, low, high):
return low if x < low else (high if x > high else x)
class SuperchargedBots(BaseScript):
def __init__(self):
super().__init__("SuperchargedBots")
self.packet = None
self.last_packet_time = -1
self.time = 0
self.delta_time = -1
self.tracker = {}
self.last_ball_touch_time = -1
def set_config(self, path):
self.config = configparser.ConfigParser()
self.config.read(path)
def get_bool_from_config(self, section, option):
return True if self.config.get(section, option).lower() in {"true", "1"} else False
def get_float_from_config(self, section, option):
return float(self.config.get(section, option))
def get_int_from_config(self, section, option):
return int(self.get_float_from_config(section, option))
def main(self):
self.set_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), "SuperchargedBots.cfg"))
self.teams = []
if self.get_bool_from_config("Options", "help_blue_team"):
self.teams.append(0)
print(f"SuperchargedBots: help_blue_team = {0 in self.teams}")
if self.get_bool_from_config("Options", "help_orange_team"):
self.teams.append(1)
print(f"SuperchargedBots: help_orange_team = {1 in self.teams}")
self.bots_only = self.get_bool_from_config("Options", "bots_only")
print(f"SuperchargedBots: bots_only = {self.bots_only}")
self.bonus_boost_accel_percent = self.get_float_from_config("Options", "bonus_boost_accel_percent") / 100
print(f"SuperchargedBots: bonus_boost_accel_percent = {self.bonus_boost_accel_percent * 100}%")
self.bonus_boost_tank = self.get_int_from_config("Options", "bonus_boost_tank")
print(f"SuperchargedBots: bonus_boost_tank = {self.bonus_boost_tank}")
self.minimum_boost = self.get_int_from_config("Options", "minimum_boost")
print(f"SuperchargedBots: minimum_boost = {self.minimum_boost}")
self.bonus_hit_percent = self.get_int_from_config("Options", "bonus_hit_percent")
print(f"SuperchargedBots: bonus_hit_percent = {self.bonus_hit_percent}")
self.demo_helper = self.get_bool_from_config("Options", "demo_helper")
print(f"SuperchargedBots: demo_helper = {self.demo_helper}")
self.socket_relay = SocketRelay()
self.socket_relay.player_input_change_handlers.append(self.input_change)
self.non_blocking_socket_relay = Thread(target=self.socket_relay.connect_and_run, args=(False, True, False))
self.non_blocking_socket_relay.start()
while 1:
try:
self.packet: GameTickPacket = self.wait_game_tick_packet()
time = self.packet.game_info.seconds_elapsed
self.delta_time = time - self.time
self.time = time
supercharged_bots = []
cars = dict()
for car_index in range(self.packet.num_cars):
car = self.packet.game_cars[car_index]
if (self.bots_only and not car.is_bot) or car.team not in self.teams:
continue
if car.name not in self.tracker:
self.tracker[car.name] = DEFAULT_CAR.copy()
supercharged_bots.append(car.name)
if not self.packet.game_info.is_round_active:
continue
if self.packet.game_info.is_kickoff_pause:
self.tracker[car.name]['total_boost'] = BOOST_CONSUMPTION
self.tracker[car.name]['last_boost'] = BOOST_CONSUMPTION
continue
velocity = None
if self.demo_helper:
for other_car_index in range(self.packet.num_cars):
other_car = self.packet.game_cars[other_car_index]
if car.team == other_car.team:
continue
car_location = Vector.from_vector(car.physics.location)
other_car_location = Vector.from_vector(other_car.physics.location)
if car_location.flat_dist(other_car_location) < 200 and abs(Vector.from_vector(car.physics.velocity).angle(other_car_location - car_location)) < 0.5:
velocity = Vector.from_vector(car.physics.velocity).flatten().scale(2300)
if self.tracker[car.name]['boosting']:
if not self.tracker[car.name]['steering'] and (car.boost > self.minimum_boost):
CP = math.cos(car.physics.rotation.pitch)
SP = math.sin(car.physics.rotation.pitch)
CY = math.cos(car.physics.rotation.yaw)
SY = math.sin(car.physics.rotation.yaw)
forward = Vector(CP*CY, CP*SY, SP)
if velocity is None:
velocity = Vector.from_vector(car.physics.velocity) + forward * (BOOST_ACCEL * self.delta_time * self.bonus_boost_accel_percent)
self.tracker[car.name]['total_boost'] -= BOOST_CONSUMPTION * self.delta_time * (100 / self.bonus_boost_tank)
boost_amount = None
if car.boost > self.minimum_boost and car.boost > self.tracker[car.name]['last_boost']:
self.tracker[car.name]['total_boost'] += car.boost - self.tracker[car.name]['last_boost']
elif car.boost < self.minimum_boost:
self.tracker[car.name]['total_boost'] = self.minimum_boost
self.tracker[car.name]['total_boost'] = cap(self.tracker[car.name]['total_boost'], 0, 100)
floored_boost = math.floor(self.tracker[car.name]['total_boost'])
if floored_boost != car.boost:
boost_amount = floored_boost
self.tracker[car.name]['last_boost'] = car.boost if boost_amount is None else boost_amount
if velocity is None and boost_amount is None:
continue
cars[car_index] = CarState(
Physics(
velocity=None if velocity is None else Vector3(*velocity)
),
boost_amount=boost_amount
)
last_ball_touch = self.packet.game_ball.latest_touch
ball = None
if last_ball_touch.time_seconds > self.last_ball_touch_time:
if (last_ball_touch.time_seconds - self.last_ball_touch_time) > 0.5:
if not self.bots_only or self.packet.game_cars[last_ball_touch.player_index].is_bot:
if last_ball_touch.team in self.teams:
bonus_hit_multiplier = self.bonus_hit_percent / 100 + 1
ball_velocity = Vector.from_vector(self.packet.game_ball.physics.velocity) * Vector(bonus_hit_multiplier, bonus_hit_multiplier, 1 / bonus_hit_multiplier)
ball = BallState(physics=Physics(
velocity=Vector3(*ball_velocity)
))
self.last_ball_touch_time = last_ball_touch.time_seconds
game_state = GameState()
if cars:
game_state.cars = cars
if ball is not None:
game_state.ball = ball
self.set_game_state(game_state)
if self.last_packet_time == -1 or self.time - self.last_packet_time >= 0.1:
self.matchcomms.outgoing_broadcast.put_nowait({
"supercharged_bots": supercharged_bots,
"supercharged_config": {
"bonus_boost_accel_percent": self.bonus_boost_accel_percent,
"bonus_boost_tank": self.bonus_boost_tank,
"minimum_boost": self.minimum_boost,
"bonus_hit_percent": self.bonus_hit_percent,
"demo_helper": self.demo_helper,
}
})
except Exception:
print_exc()
def input_change(self, change: PlayerInputChange, seconds: float, frame_num: int):
try:
game_car = self.packet.game_cars[change.PlayerIndex()]
if game_car.name not in self.tracker:
return
controller_state = change.ControllerState()
self.tracker[game_car.name]['boosting'] = controller_state.Boost()
self.tracker[game_car.name]['steering'] = (game_car.has_wheel_contact and controller_state.Steer() > 0.2) or (not game_car.has_wheel_contact and (controller_state.Yaw() > 0.2 or controller_state.Pitch() > 0.2))
except Exception:
print_exc()
# Vector supports 1D, 2D and 3D Vectors, as well as calculations between them
# Arithmetic with 1D and 2D lists/tuples aren't supported - just set the remaining values to 0 manually
# With this new setup, Vector is much faster because it's just a wrapper for numpy
class Vector:
def __init__(self, x: float = 0, y: float = 0, z: float = 0, np_arr=None):
# this is a private property - this is so all other things treat this class like a list, and so should you!
self._np = np.array([x, y, z]) if np_arr is None else np_arr
def __getitem__(self, index):
return self._np[index].item()
def __setitem__(self, index, value):
self._np[index] = value
@property
def x(self):
return self._np[0].item()
@x.setter
def x(self, value):
self._np[0] = value
@property
def y(self):
return self._np[1].item()
@y.setter
def y(self, value):
self._np[1] = value
@property
def z(self):
return self._np[2].item()
@z.setter
def z(self, value):
self._np[2] = value
# self == value
def __eq__(self, value):
if isinstance(value, float) or isinstance(value, int):
return self.magnitude() == value
if hasattr(value, "_np"):
value = value._np
return (self._np == value).all()
# len(self)
def __len__(self):
return 3 # this is a 3 dimensional vector, so we return 3
# str(self)
def __str__(self):
# Vector's can be printed to console
return f"[{self.x} {self.y} {self.z}]"
# repr(self)
def __repr__(self):
return f"Vector(x={self.x}, y={self.y}, z={self.z})"
# -self
def __neg__(self):
return Vector(np_arr=self._np * -1)
# self + value
def __add__(self, value):
if hasattr(value, "_np"):
value = value._np
return Vector(np_arr=self._np+value)
__radd__ = __add__
# self - value
def __sub__(self, value):
if hasattr(value, "_np"):
value = value._np
return Vector(np_arr=self._np-value)
def __rsub__(self, value):
return -self + value
# self * value
def __mul__(self, value):
if hasattr(value, "_np"):
value = value._np
return Vector(np_arr=self._np*value)
__rmul__ = __mul__
# self / value
def __truediv__(self, value):
if hasattr(value, "_np"):
value = value._np
return Vector(np_arr=self._np/value)
def __rtruediv__(self, value):
return self * (1 / value)
# round(self)
def __round__(self, decimals=0) -> Vector:
# Rounds all of the values
return Vector(np_arr=np.around(self._np, decimals=decimals))
@staticmethod
def from_vector(vec) -> Vector:
return Vector(vec.x, vec.y, vec.z)
def magnitude(self) -> float:
# Returns the length of the vector
return np.linalg.norm(self._np).item()
def _magnitude(self) -> np.float64:
# Returns the length of the vector in a numpy float 64
return np.linalg.norm(self._np)
def dot(self, value: Vector) -> float:
# Returns the dot product of two vectors
if hasattr(value, "_np"):
value = value._np
return self._np.dot(value).item()
def cross(self, value: Vector) -> Vector:
# Returns the cross product of two vectors
if hasattr(value, "_np"):
value = value._np
return Vector(np_arr=np.cross(self._np, value))
def copy(self) -> Vector:
# Returns a copy of the vector
return Vector(*self._np)
def normalize(self, return_magnitude=False) -> List[Vector, float] or Vector:
# normalize() returns a Vector that shares the same direction but has a length of 1
# normalize(True) can also be used if you'd like the length of this Vector (used for optimization)
magnitude = self._magnitude()
if magnitude != 0:
norm_vec = Vector(np_arr=self._np / magnitude)
if return_magnitude:
return norm_vec, magnitude.item()
return norm_vec
if return_magnitude:
return Vector(), 0
return Vector()
def _normalize(self) -> np.ndarray:
# Normalizes a Vector and returns a numpy array
magnitude = self._magnitude()
if magnitude != 0:
return self._np / magnitude
return np.array((0, 0, 0))
def flatten(self) -> Vector:
# Sets Z (Vector[2]) to 0, making the Vector 2D
return Vector(self._np[0], self._np[1])
def angle2D(self, value: Vector) -> float:
# Returns the 2D angle between this Vector and another Vector in radians
return self.flatten().angle(value.flatten())
def angle(self, value: Vector) -> float:
# Returns the angle between this Vector and another Vector in radians
dp = np.dot(self._normalize(), value._normalize()).item()
return math.acos(-1 if dp < -1 else (1 if dp > 1 else dp))
def rotate2D(self, angle: float) -> Vector:
# Rotates this Vector by the given angle in radians
# Note that this is only 2D, in the x and y axis
return Vector((math.cos(angle)*self.x) - (math.sin(angle)*self.y), (math.sin(angle)*self.x) + (math.cos(angle)*self.y), self.z)
def clamp2D(self, start: Vector, end: Vector) -> Vector:
# Similar to integer clamping, Vector's clamp2D() forces the Vector's direction between a start and end Vector
# Such that Start < Vector < End in terms of clockwise rotation
# Note that this is only 2D, in the x and y axis
s = self._normalize()
right = np.dot(s, np.cross(end._np, (0, 0, -1))) < 0
left = np.dot(s, np.cross(start._np, (0, 0, -1))) > 0
if (right and left) if np.dot(end._np, np.cross(start._np, (0, 0, -1))) > 0 else (right or left):
return self
if np.dot(start._np, s) < np.dot(end._np, s):
return end
return start
def clamp(self, start: Vector, end: Vector) -> Vector:
# This extends clamp2D so it also clamps the vector's z
s = self.clamp2D(start, end)
if s.z < start.z:
s = s.flatten().scale(1 - start.z)
s.z = start.z
elif s.z > end.z:
s = s.flatten().scale(1 - end.z)
s.z = end.z
return s
def dist(self, value: Vector) -> float:
# Distance between 2 vectors
if hasattr(value, "_np"):
value = value._np
return np.linalg.norm(self._np - value).item()
def flat_dist(self, value: Vector) -> float:
# Distance between 2 vectors on a 2D plane
return value.flatten().dist(self.flatten())
def cap(self, low: float, high: float) -> Vector:
# Caps all values in a Vector between 'low' and 'high'
return Vector(np_arr=np.clip(self._np, low, high))
def midpoint(self, value: Vector) -> Vector:
# Midpoint of the 2 vectors
if hasattr(value, "_np"):
value = value._np
return Vector(np_arr=(self._np + value) / 2)
def scale(self, value: float) -> Vector:
# Returns a vector that has the same direction but with a value as the magnitude
return self.normalize() * value
if __name__ == "__main__":
SuperchargedBots = SuperchargedBots()
SuperchargedBots.main()
|
stream_server.py
|
"""
Module to handle one or more clients requesting video stream.
Using async with intentional blocking sockets because I had issues using
async non-blocking sockets, and because it yielded much better performance
compared to threading only prototype.
"""
import asyncio as aio
import socket
import logging
import queue as q
import threading as th
from typing import List
import cv2 as cv
from .video_streamer import dataclass_objects as do
from .video_streamer import video_streamer as vs, socket_utils as su
logger = logging.getLogger(__name__)
# Max `do.StreamData` objects which the server will process and queue
# for each connected client
FRAMES_BUFFER_SIZE = 3
# Format used to encode images using `cv.imencode`
ENC = '.jpg'
# Socket the server will run on and listen for client requests
_socket: socket.socket = su.get_ipv4_tcp_socket()
_connected_clients: List[do.ClientExtras] = []
# Helpful `VideoIter` to easily get `do.StreamData` data
# from the camera or video source
_vid_iter: vs.MasterVideoIter
_server_settings: do.ServerSettings
async def _frames_q_updater():
logger.debug("`_frames_q_updater` started!")
global _vid_iter
global _connected_clients
while True:
await aio.sleep(0)
# Release camera or video source if no clients are connected
if not _connected_clients:
_vid_iter.release()
await aio.sleep(2)
continue
# Read next frame and make it available to connected clients
try:
stream_data = next(_vid_iter)
fps_text = str(_vid_iter.fps_estimator.get_estimate()).split('.')[0]
approx_clients = len(_connected_clients)
checked_clients = 0
while checked_clients < approx_clients:
await aio.sleep(0)
if _connected_clients: # Double check if clients are connected
try:
ce = _connected_clients[checked_clients]
# If client socket is closed, remove this client
if ce.sock.fileno() == -1:
a = ce.addr
logger.info(f"Client removed {a[0]+':'+str(a[1])}")
del _connected_clients[checked_clients]
raise IndexError
checked_clients += 1
# List changed, check things from the start
except IndexError:
break
# Client hasn't consumed existing data, move to next client
if ce.stream_data_q.full():
continue
custom_frame = None
# Apply stream settings if they were received
if ce.stream_settings:
custom_frame = vs.apply_stream_settings(
stream_data.frame,
ce.stream_settings,
fps_text
)
# Use stream data with default settings
if custom_frame is None:
custom_frame = stream_data.frame
# Ready stream data for network transmission
_, compressed_frame = cv.imencode(ENC, custom_frame)
custom_stream_data = do.StreamData(compressed_frame)
ce.stream_data_q.put_nowait(custom_stream_data)
except StopIteration:
logger.debug("`frames_q_updater` stopped!")
# Close any connected clients
try:
logger.debug("Video finished. Closing remaining client connections.")
for _ in range(len(_connected_clients)):
ce = _connected_clients.pop()
a = ce.addr
logger.info(f"Client removed {a[0] + ':' + str(a[1])}")
ce.sock.close()
except IndexError:
logger.debug("Closed all connected clients!")
break
async def _serve_clients():
logger.debug("`_serve_clients` started!")
global _connected_clients
while True:
await aio.sleep(0)
if not _connected_clients:
await aio.sleep(2)
continue
approx_clients = len(_connected_clients)
checked_clients = 0
while checked_clients < approx_clients:
await aio.sleep(0)
if not _connected_clients:
break
try:
ce = _connected_clients[checked_clients]
if ce.sock.fileno() == -1:
del _connected_clients[checked_clients]
raise IndexError
else:
checked_clients += 1
except IndexError:
break
# Send stream data
try:
await aio.sleep(0)
if not ce.stream_data_q.empty():
su.send_data(ce.sock, ce.stream_data_q.get_nowait())
except (ConnectionResetError, ConnectionAbortedError, ConnectionError) as e:
logger.error(str(e))
if ce.sock.fileno() != -1:
ce.sock.close()
def _accept_connections():
logger.debug("`_accept_connections` started!")
global _socket
global _connected_clients
global _vid_iter
while True:
client, addr = _socket.accept()
logger.info(f"New client connected: {addr[0]+':'+str(addr[1])}")
try:
client_data: do.PreStreamDataByClient = su.recv_data(client)
su.send_data(client, do.PreStreamDataByServer(_vid_iter.vid_specs))
_connected_clients.append(
do.ClientExtras(
client, addr, q.Queue(FRAMES_BUFFER_SIZE),
client_data.stream_settings
)
)
except BrokenPipeError as e:
logger.error(f"{str(e)}\nClient removed {addr[0] + ':' + str(addr[1])}")
client.close()
async def _run_server():
global _socket
global _vid_iter
global _server_settings
s = _server_settings.source
_vid_iter = vs.MasterVideoIter(s)
if not _vid_iter.is_working():
raise vs.VSError(f"Could not read from given source '{s}'")
else:
_vid_iter.release() # we don't keep resources open if no clients connect
_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_socket.bind((_server_settings.ip, _server_settings.port))
_socket.listen(_server_settings.backlog)
logger.info(f"Server is listening on '{_server_settings.port}'")
accept_connections_th = th.Thread(target=_accept_connections)
accept_connections_th.setDaemon(True)
accept_connections_th.start()
frames_q_updater_t = aio.create_task(_frames_q_updater())
serve_clients_t = aio.create_task(_serve_clients())
await aio.gather(frames_q_updater_t, serve_clients_t)
def run_server(server_settings: do.ServerSettings):
logger.debug(f"Parameters received for `run_server`:\n{server_settings}")
global _server_settings
_server_settings = server_settings
aio.run(_run_server())
|
test_pyca.py
|
import sys
import time
import threading
import logging
import pytest
import numpy as np
import pyca
from conftest import test_pvs, pvbase
if sys.version_info.major >= 3:
long = int
logger = logging.getLogger(__name__)
class ConnectCallback(object):
def __init__(self, name):
self.name = name
self.connected = False
self.cev = threading.Event()
self.dcev = threading.Event()
self.lock = threading.RLock()
def wait(self, timeout=None):
logger.debug('Wait on connect callback %s', self.name)
ok = self.cev.wait(timeout=timeout)
if ok:
logger.debug('Wait complete on connect %s', self.name)
else:
logger.debug('Wait fail on connect %s', self.name)
return ok
def wait_dc(self, timeout=None):
logger.debug('Wait on disconnect callback %s', self.name)
ok = self.dcev.wait(timeout=timeout)
if ok:
logger.debug('Wait complete on disconnect %s', self.name)
else:
logger.debug('Wait fail on disconnect %s', self.name)
return ok
def __call__(self, is_connected):
logger.debug('Connect callback in %s, is_connected=%s',
self.name, is_connected)
with self.lock:
self.connected = is_connected
if self.connected:
self.cev.set()
self.dcev.clear()
else:
self.dcev.set()
self.cev.clear()
class GetCallback(object):
def __init__(self, name):
self.name = name
self.gev = threading.Event()
def wait(self, timeout=None):
logger.debug('Wait on get callback %s', self.name)
ok = self.gev.wait(timeout=timeout)
if ok:
logger.debug('Wait complete on get %s', self.name)
else:
logger.debug('Wait fail on get %s', self.name)
return ok
def reset(self):
logger.debug('Clear get callback %s', self.name)
self.gev.clear()
def __call__(self, exception=None):
logger.debug('Get callback in %s, exception=%s',
self.name, exception)
if exception is None:
self.gev.set()
def setup_pv(pvname, connect=True):
pv = pyca.capv(pvname)
pv.connect_cb = ConnectCallback(pvname)
pv.getevt_cb = GetCallback(pvname)
if connect:
pv.create_channel()
pv.connect_cb.wait(timeout=1)
return pv
def test_server_start(server):
pass
@pytest.mark.timeout(10)
@pytest.mark.parametrize('pvname', test_pvs)
def test_create_and_clear_channel(pvname):
logger.debug('test_create_and_clear_channel %s', pvname)
pv = setup_pv(pvname)
assert pv.connect_cb.connected
# No callbacks on dc
pv.clear_channel()
time.sleep(1)
with pytest.raises(pyca.pyexc):
pv.get_data(False, -1.0)
@pytest.mark.timeout(10)
@pytest.mark.parametrize('pvname', test_pvs)
def test_get_data(pvname):
logger.debug('test_get_data %s', pvname)
pv = setup_pv(pvname)
# get time vars
pv.get_data(False, -1.0)
pyca.flush_io()
assert pv.getevt_cb.wait(timeout=1)
pv.getevt_cb.reset()
if not isinstance(pv.data['value'], str):
# get ctrl vars
pv.get_data(True, -1.0)
pyca.flush_io()
assert pv.getevt_cb.wait(timeout=1)
# check that the data has all the keys
all_keys = ('status', 'value', 'secs', 'nsec')
for key in all_keys:
assert key in pv.data
# check that value is not None
assert pv.data['value'] is not None
@pytest.mark.timeout(10)
@pytest.mark.parametrize('pvname', test_pvs)
def test_put_get(pvname):
logger.debug('test_put_get %s', pvname)
pv = setup_pv(pvname)
pv.get_data(False, -1.0)
pyca.flush_io()
assert pv.getevt_cb.wait(timeout=1)
old_value = pv.data['value']
pv_type = type(old_value)
logger.debug('%s is of type %s', pvname, pv_type)
if pv_type in (int, long, float):
new_value = old_value + 1
elif pv_type == str:
new_value = "putget"
elif pv_type == tuple:
new_value = tuple([1] * len(old_value))
logger.debug('caput %s %s', pvname, new_value)
pv.put_data(new_value, 1.0)
pv.getevt_cb.reset()
pv.get_data(False, -1.0)
pyca.flush_io()
assert pv.getevt_cb.wait(timeout=1)
recv_value = pv.data['value']
assert recv_value == new_value
@pytest.mark.timeout(10)
@pytest.mark.parametrize('pvname', test_pvs)
def test_subscribe(pvname):
logger.debug('test_subscribe %s', pvname)
pv = setup_pv(pvname)
ev = threading.Event()
def mon_cb(exception=None):
logger.debug('monitor_cb in %s, exception=%s',
pvname, exception)
if exception is None:
ev.set()
pv.monitor_cb = mon_cb
pv.subscribe_channel(pyca.DBE_VALUE | pyca.DBE_LOG | pyca.DBE_ALARM, False)
# Repeat the put/get test without the get
pv.get_data(False, -1.0)
pyca.flush_io()
assert pv.getevt_cb.wait(timeout=1)
old_value = pv.data['value']
pv_type = type(old_value)
logger.debug('%s is of type %s', pvname, pv_type)
if pv_type in (int, long, float):
new_value = old_value + 1
elif pv_type == str:
new_value = "putmon"
elif pv_type == tuple:
new_value = tuple([1] * len(old_value))
logger.debug('caput %s %s', pvname, new_value)
ev.clear()
pv.put_data(new_value, 1.0)
assert ev.wait(timeout=1)
recv_value = pv.data['value']
assert recv_value == new_value
@pytest.mark.timeout(10)
@pytest.mark.parametrize('pvname', test_pvs)
def test_misc(pvname):
logger.debug('test_misc %s', pvname)
pv = setup_pv(pvname)
assert isinstance(pv.host(), str)
assert isinstance(pv.state(), int)
assert isinstance(pv.count(), int)
assert isinstance(pv.type(), str)
assert isinstance(pv.rwaccess(), int)
@pytest.mark.timeout(10)
def test_waveform():
logger.debug('test_waveform')
pv = setup_pv(pvbase + ":WAVE")
# Do as a tuple
pv.use_numpy = False
pv.get_data(False, -1.0)
pyca.flush_io()
assert pv.getevt_cb.wait(timeout=1)
val = pv.data['value']
assert isinstance(val, tuple)
assert len(val) == pv.count()
pv.getevt_cb.reset()
# Do as a np.ndarray
pv.use_numpy = True
pv.get_data(False, -1.0)
pyca.flush_io()
assert pv.getevt_cb.wait(timeout=1)
val = pv.data['value']
assert isinstance(val, np.ndarray)
assert len(val) == pv.count()
@pytest.mark.timeout(10)
def test_threads():
logger.debug('test_threads')
def some_thread_thing(pvname):
pyca.attach_context()
pv = setup_pv(pvname)
pv.get_data(False, -1.0)
pyca.flush_io()
assert pv.getevt_cb.wait(timeout=1)
assert isinstance(pv.data['value'], tuple)
pvname = pvbase + ":WAVE"
thread = threading.Thread(target=some_thread_thing, args=(pvname,))
thread.start()
thread.join()
|
cluster.py
|
# Copyright (c) 2021 MIT
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import time
import signal
import sys, os
import subprocess
import json
import xmlrpc.server
import xmlrpc.client
import re
import threading
from os.path import expanduser
from argparse import ArgumentParser, REMAINDER
from typing import Optional, IO, List, Any
from jobDescription import TrainingJob
import grpc
import runtime_pb2
import runtime_pb2_grpc
import torch
# import examples.vgg as vgg # TODO: this is used for debugging. Remove this later.
extra_args = [] # unparsed arguments stored here are forwarded to runtimes
HAS_EXCEPTION = False
def excepthook(args):
global HAS_EXCEPTION
print("In excepthook", args)
HAS_EXCEPTION = True
threading.excepthook = excepthook
def waitthreads(threadList):
for thread in threadList:
while thread.is_alive() and not HAS_EXCEPTION:
time.sleep(0.1)
if HAS_EXCEPTION:
sys.exit(-1)
thread.join()
class CppRuntimeProxy:
def __init__(self, addressWithPort: str):
self.channel = grpc.insecure_channel(addressWithPort) # ex) 'localhost:50051'
self.stub = runtime_pb2_grpc.RuntimeStub(self.channel)
def scheduleTraining(self, name, jobInJson, dataDir, tensorTagsInJson, jobRankToGlobalRankInJson, jobParamsInJson):
response = self.stub.ScheduleTraining(runtime_pb2.ScheduleTrainingRequest(
name=name, job_in_json=jobInJson, data_dir=dataDir,
tensor_tags_in_json=tensorTagsInJson,
job_rank_to_global_rank_in_json=jobRankToGlobalRankInJson, job_meta_params_in_json=jobParamsInJson))
print("received: " + response.message)
def poke(self):
response = self.stub.Poke(runtime_pb2.Empty())
# print("received: " + response.message)
def shutdown(self):
response = self.stub.Shutdown(runtime_pb2.Empty())
print("received: " + response.message)
def initCommBackend(self):
# response = self.stub.(runtime_pb2.Empty())
# print("received: " + response.message)
pass
# print("initCommBackend() not implemented")
def initCommNCCL(self, message, msgType, groupId, members):
response = self.stub.InitCommNCCL(runtime_pb2.InitCommNCCLMsg(
message=message, msg_type=msgType, group_id=groupId, members=members))
print("received: " + response.message)
return response.group_id;
def initCommGRPC(self, rankToIpMap):
rankToIpMapInJson = json.dumps(rankToIpMap)
print("In initCommGRPC, rankToIpMapInJson: " + rankToIpMapInJson)
response = self.stub.InitCommGRPC(runtime_pb2.InitCommGRPCRequest(
rank_to_ip_map_in_json = rankToIpMapInJson
))
print("received: " + response.message)
def initCommGroups(self, jobName, commGroupsInJson):
print("initCommGroups not implemented")
class Location:
def __init__(self, address: str, port: int, device: int, userId: str, sshKeyPath: str, isCpp: bool):
self.address = address
self.port = port
self.device = device
self.userId = userId
self.sshKeyPath = sshKeyPath
self.serverId = None
self.proxy = None
self.isCpp = isCpp
self.is_local = address == "127.0.0.1"
self.process = None
def getProxy(self, maxRetry = 180):
if self.proxy != None:
# print("getProxy() returned from cached proxy value.")
return self.proxy
# Python runtime
retryGap = 1
retryCount = 0
while retryCount < maxRetry:
try:
if self.isCpp: # CPP runtime
self.proxy = CppRuntimeProxy("%s:%d"%(self.address, self.port))
# print("cppProxy created for %s:%d"%(self.address, self.port))
else:
self.proxy = xmlrpc.client.ServerProxy("http://%s:%d/"%(self.address, self.port))
self.proxy.poke()
return self.proxy
except (ConnectionRefusedError, grpc.RpcError): # ConnectionRefusedError is for xmlrpc.
print("Cannot connect to %s:%d. Will retry in %d sec." %
(self.address, self.port, retryGap))
time.sleep(retryGap)
# retryGap += 2 # exponential back off.
retryCount += 1
assert False, "couldn't connect"
return None
def downloadFile(self, remotePath: str, localPath: str):
assert not self.is_local
print(" Downloading %s to %s at %s" % (remotePath, localPath, self.address))
kwargs = dict()
kwargs['stderr'] = subprocess.STDOUT
# sh_command = ['mkdir', '-p', localPath]
# subprocess.check_call(sh_command, **kwargs)
sh_command = ['scp', '-i', self.sshKeyPath, '%s@%s:%s' % (self.userId, self.address, remotePath), localPath]
subprocess.check_call(sh_command, **kwargs)
def uploadFile(self, localFilePath, remotePath):
assert not self.is_local
print(" Uploading %s to %s at %s" % (localFilePath, remotePath, self.address))
kwargs = dict()
# kwargs['shell'] = True
kwargs['stderr'] = subprocess.STDOUT
sh_command = ['scp', '-i', self.sshKeyPath, localFilePath, '%s@%s:%s' % (self.userId, self.address, remotePath)]
subprocess.check_call(sh_command, **kwargs)
def rsh(self, command):
kwargs = dict()
kwargs['stderr'] = subprocess.STDOUT
# sh_command = ['ssh', '-v', '-i', '~/.ssh/ulma-sjp.pem', 'ubuntu@%s' % self, '%s' % command]
if self.is_local:
sh_command = command
kwargs["shell"] = True
else:
sh_command = ['ssh', '-i', self.sshKeyPath, '-o', 'StrictHostKeyChecking=no', '%s@%s' % (self.userId, self.address), '%s' % command]
try:
subprocess.check_call(sh_command, **kwargs)
except subprocess.CalledProcessError as e:
output = e.output
exit(1)
return
def __monitor(self):
self.process.wait()
sys.exit(0)
def rshAsync(self, command, **kwargs):
print("Sending cmd: %s" % command)
if self.is_local:
sh_command = command
kwargs["shell"] = True
else:
sh_command = ['ssh', '-i', self.sshKeyPath, '-o StrictHostKeyChecking=no', '%s@%s' % (self.userId, self.address),
'%s' % command]
self.process = subprocess.Popen(sh_command, **kwargs)
t = threading.Thread(target=Location.__monitor, args=(self,), daemon=True)
t.start()
return self.process
def upSync(self, localPath, remotePath):
if self.is_local:
assert False
return
try:
subprocess.check_call(['rsync', '-e', 'ssh -i %s -o StrictHostKeyChecking=no' % self.sshKeyPath,
'-rh', "--exclude=*__pycache__", localPath, "%s@%s:%s" % (self.userId, self.address, remotePath)],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
output = e.output
exit(1)
class ClusterCoordinator(xmlrpc.server.SimpleXMLRPCServer):
""" GPU cluster coordinator. It accepts training jobs from clients and schedule them to runtimes. """
def __init__(self, addrToBind: str, portToBind: int, locations: List[Location], workDir: str, be_batch_size: int):
super(ClusterCoordinator, self).__init__((addrToBind, portToBind))
self.myAddr = addrToBind
self.myPort = portToBind
self.locations = locations
self.workDir = workDir
self.processes = [] # from subprocess calls used for launching runtime.
self.nextTagStartOffset = 1
self.be_batch_size = be_batch_size
self.commGroups = set()
self.ongoingJobs = {} # Dict of contexts of ongoing jobs. Indexed by job name.
f = open("runtimeResult.data", "w")
f.close()
def _dispatch(self, method, params):
""" Custom dispatcher for XML-RPC server. """
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC for security.
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
######################################################
## RPC handlers
######################################################
def export_poke(self):
return 'Returned from poke at %s' % self.myAddr
def export_scheduleTraining(self, jobName: str, trainingJobInJSON: str, runbe):
job = TrainingJob("test", None, None, 0, 0, "")
job.loadJSON(trainingJobInJSON)
print("received job")
gpusUsed = job.getGpusUsed()
moduleDescList = [job.dumpSingleRunnableModule(rank) for rank in range(gpusUsed)]
tensorTags = self.buildCommTensorTags(moduleDescList)
tensorTagsInJson = json.dumps(tensorTags)
for rank in range(gpusUsed):
with open(f"/tmp/rank{rank}.json", "wb") as f:
f.write(bytes(moduleDescList[rank].encode("utf-8")))
commSets = self.buildNeededCommGroups(moduleDescList)
for s in commSets:
self.initCommBackendAll("nccl", s)
jobRankToGlobalRank = list(range(gpusUsed))
jobRankToGlobalRankInJson = json.dumps(jobRankToGlobalRank)
# TODO: should pick locations that doesn't have other priority job scheduled.
if len(self.locations) < gpusUsed:
return "Not enough servers available. %d gpus available while %d needed" % (len(self.locations), gpusUsed)
jobParams = {
"run_with_be": runbe,
"nr_gpus": gpusUsed,
"cifar_training": "cifar" in jobName,
"lossfn": "CrossEntropyLoss" if "gpt2" in jobName else "NLL",
}
jobParamsInJson = json.dumps(jobParams)
threadList = []
def requestScheduleTraining(proxy, jobInJson):
proxy.scheduleTraining(jobName, jobInJson, "SYNTHETIC", tensorTagsInJson, jobRankToGlobalRankInJson, jobParamsInJson)
for rank in range(gpusUsed):
location = self.locations[rank]
moduleDesc = moduleDescList[rank]
thread = threading.Thread(name='reqScheTrain%d'%rank, target=requestScheduleTraining, args=(location.getProxy(), moduleDesc))
threadList.append(thread)
thread.start()
waitthreads(threadList)
self.ongoingJobs[jobName] = {"iterTime": 0, "gpuMsec": 0, "gpusUsed": gpusUsed, "gpusFinished": 0, "globalBatchSize": job.globalBatchSize}
self.ongoingJobs[jobName].update({"beImagesPerIter": 0.0, "idleMsPerIter": 0.0})
# for rank in range(gpusUsed):
# location = self.locations[rank]
# moduleDesc = moduleDescList[rank] # job.dumpSingleRunnableModule(rank)
# print(location.getProxy().scheduleTraining(jobName, moduleDesc, "SYNTHETIC", tensorTagsInJson, jobRankToGlobalRankInJson))
return 'done'
def export_notifyTrainingFinished(self, runtimeAddress: str, name: str, beImagesPerIter: float, idleMsPerIter: float, remainingJobCount: int, fpTime: float, bpTime: float, iterTime: float):
print("Training for %s is completed at %s. (%d jobs are remaining) fp: %3.1f bp: %3.1f iterTime: %3.1f" % (name, runtimeAddress, remainingJobCount, fpTime, bpTime, iterTime))
iterTime /= 1000
self.ongoingJobs[name]["iterTime"] = max(self.ongoingJobs[name]["iterTime"], iterTime)
self.ongoingJobs[name]["gpuMsec"] += (fpTime + bpTime) / 1000
self.ongoingJobs[name]["gpusFinished"] += 1
self.ongoingJobs[name]["beImagesPerIter"] += beImagesPerIter
self.ongoingJobs[name]["idleMsPerIter"] += idleMsPerIter
if self.ongoingJobs[name]["gpusFinished"] == self.ongoingJobs[name]["gpusUsed"]:
toprints = [
"{globalBatchSize:2}", "{gpusUsed:2}", "{iterTime:4.1f}",
"{gpuMsec:4.1f}", "{beImagesPerIter:3.1f}",
"{idleMsPerIter:3.1f}"
]
print("Training for {} is completed entirely.".format(name))
cols = ["GlobalBatchSize", "GpusUsed", "IterTime", "GpuMsec", "BeImagesPerIter", "IdleMsPerIter"]
print(" " + " ".join(cols))
dataline = " " + " ".join(toprints).format(**self.ongoingJobs[name])
print(dataline)
f = open("runtimeResult.data", "a")
f.write(dataline + "\n")
f.close()
return 'done'
def export_addGpuNode(self):
print("NOT YET IMPLEMENTED.")
######################################################
## Internal helper methods
######################################################
def buildCommTensorTags(self, moduleDescList):
# TODO: need tag allocator that can recycle tags.
tag = 0
tensorTags = {}
for moduleDesc in moduleDescList:
spec = json.loads(moduleDesc)
for ldsc in spec["layers"]:
if "xfers" in ldsc: # either sender or receiver need to assign tag.
for item in ldsc["xfers"]:
tensorTags[item["name"]] = tag
tag += item["prop"]["xferSamples"]
tensorTags[item["name"] + "_back"] = tag
tag += item["prop"]["xferSamples"]
return tensorTags
def buildNeededCommGroups(self, moduleDescList):
groups = set()
desc = json.loads(moduleDescList[0])
for l in desc['layers']:
activeset = tuple(sorted(l['gpuAssignment']))
if len(activeset) > 1:
groups.add(activeset)
return list(groups)
######################################################
## Runtime cluster management
######################################################
def installPackages(self):
""" Install required software at each runtime server """
pipPackages = ["torch", "jsonpickle", "torchvision"]
# "pip install torch==1.8.0+cu111 torchvision==0.9.0+cu111 torchaudio==0.8.0 -f https://download.pytorch.org/whl/torch_stable.html"]
for location in self.locations:
for pipPackage in pipPackages:
location.rsh("pip install %s" % pipPackage)
def launchRuntimeAll(self, c10dBackend: str, profile: bool, cppRuntime: bool, manualLaunch: bool):
""" Launch runtime at all remote locations. Also registers the sighandler
that cleanly shuts down all remote runtime servers.
"""
# Using the absolute path for compatibility with C++ runtime.
logdir = args.logdir
if not logdir:
logdir = os.getcwd() + "/logs/"
upSyncedAddrs = set()
for i, location in enumerate(self.locations):
if (location.address not in upSyncedAddrs):
# TODO: skip if location's addr is same as the current node.
# location.upSync(".", self.workDir)
upSyncedAddrs.add(location.address)
# pass master ip and port.
stdoutFp = open(f"{logdir}/runtime%d.out"%i, "a", buffering=1)
stderrFp = open(f"{logdir}/runtime%d.err"%i, "a", buffering=1)
nsysPrefix = ""
if "--cuda_profile" in extra_args:# and location.device == 0: # Only run 1 nsys per host.
nsysPrefix = "nsys profile -f true -o net%d -c cudaProfilerApi -t cuda,nvtx --export sqlite " % i # -s none
if manualLaunch:
print("Skipping ssh launching runtime. Must have launched them manually.")
elif cppRuntime:
self.processes.append(location.rshAsync(
f"CUDA_VISIBLE_DEVICES={location.device} {nsysPrefix} {self.workDir}/csrc/build/runtime" + \
" --myAddr %s:%d --device 0 --c10dBackend %s --rank %d --worldSize %d --logdir %s --be_batch_size %d %s" % \
(location.address, location.port, c10dBackend, i, len(self.locations), logdir, self.be_batch_size, " ".join(extra_args)) #+ \
, stdout=stdoutFp, stderr=stderrFp))
else:
self.processes.append(location.rshAsync(
# nsysPrefix + "python3 " + self.workDir + "runtime.py" + \
"source ~/.profile; " + nsysPrefix + "python3 " + self.workDir + "runtime.py" + \
" --coordinatorAddr %s:%d --myAddr %s:%d --device %d --c10dBackend %s --rank %d --worldSize %d --be_batch_size %d %s" % \
(self.myAddr, self.myPort, location.address, location.port, location.device, c10dBackend, i, len(self.locations), self.be_batch_size, "--profile" if profile else "") #+ \
, stdout=stdoutFp, stderr=stderrFp))
sig_names = {2: "SIGINT", 15: "SIGTERM"}
last_return_code = None
def sigkill_handler(signum, frame):
print("signum:%d Trying to shutdown all runtime." % signum)
self.shutdownRuntimeAll()
# self.waitForRuntimeAll()
for process in self.processes:
print(f"Killing subprocess {process.pid}")
try:
process.terminate()
# process.kill()
except Exception:
pass
if last_return_code is not None:
raise subprocess.CalledProcessError(returncode=last_return_code, cmd=cmd)
if signum in sig_names:
print(f"Main process received {sig_names[signum]}, exiting")
sys.exit(1)
signal.signal(signal.SIGINT, sigkill_handler)
# signal.signal(signal.SIGTERM, sigkill_handler)
time.sleep(2) ## + (15 if profile else 0))
for location in self.locations:
proxy = location.getProxy()
proxy.poke()
def shutdownRuntimeAll(self):
""" Ask all remote runtime servers to stop. Returns after all servers ack the shutdown request. """
for location in self.locations:
try:
proxy = location.getProxy(maxRetry=1)
if proxy != None:
print(proxy.shutdown())
# print(location.getProxy(maxRetry=1).shutdown())
except xmlrpc.client.Fault:
print("pipe broken while shuting down %s" % location.address)
except grpc.RpcError:
print("GRPC error while shuting down %s" % location.address)
def initCommBackendAll(self, c10dBackend, commGroupSet):
assert(sorted(commGroupSet) == list(commGroupSet))
if tuple(commGroupSet) in self.commGroups:
return
self.commGroups.add(tuple(commGroupSet))
if c10dBackend == "nccl":
group_id = self.locations[commGroupSet[0]].getProxy().initCommNCCL("Generate comm group ID", 0, bytes(128), list(commGroupSet))
threadList = []
def requestInitCommBackend(proxy):
# print(proxy.initCommBackend())
if c10dBackend == "grpc":
print(proxy.initCommGRPC(rankToIpMap))
if c10dBackend == "nccl":
proxy.initCommNCCL("Join comm group", 1, group_id, list(commGroupSet))
for i in commGroupSet:
location = self.locations[i]
thread = threading.Thread(name='init_comm%d'%i, target=requestInitCommBackend, args=(location.getProxy(),))
thread.start()
threadList.append(thread)
waitthreads(threadList)
def initCommGroupsAll(self, jobName: str, commGrpDict: dict, jobRankToGlobalRank: list):
""" A helper function that will ask all runtimes to create new c10d comm groups.
Used while scheduling a new training job. This method should be invoked before
scheduling a new training job to any runtime that will participate in training.
"""
commGrpDictWithGlobalRanks = {}
for grpName in commGrpDict:
grpRanks = commGrpDict[grpName]
globalGrpRanks = [jobRankToGlobalRank[rank] for rank in grpRanks]
commGrpDictWithGlobalRanks[grpName] = globalGrpRanks
commGrpDictWithGlobalRanksInJson = json.dumps(commGrpDictWithGlobalRanks)
threadList = []
def requestInitCommGroups(proxy, jobName, commGroupsInJson):
# print(proxy.initCommGroups(jobName, commGroupsInJson))
proxy.initCommGroups(jobName, commGroupsInJson)
for i, location in enumerate(self.locations):
thread = threading.Thread(name='init_commGroups%d'%i, target=requestInitCommGroups,
args=(location.getProxy(), jobName, commGrpDictWithGlobalRanksInJson,))
thread.start()
threadList.append(thread)
waitthreads(threadList)
def waitForRuntimeAll(self):
""" Waits until all runtime processes terminate. Development use only. """
# TODO: replace this method with xmlrpc server event loop.
print("Waiting for ssh process to terminate.")
for p in self.processes:
p.wait()
####################################################################################
## Initial launch scripts
####################################################################################
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="ClusterCoordinator initial launch "
"script that will spawn up "
"multiple distributed processes")
# Optional arguments for the launch helper
parser.add_argument("--addrToBind", type=str, default="localhost:12340",
help="IP:port to listen for requests to the cluster coordinator")
parser.add_argument("--c10dBackend", type=str, default="nccl",
help="pytorch c10d communication backend. Type either nccl or gloo")
parser.add_argument("--logLevel", type=int, default=1,
help="Logging level. 0: verbose, 1: Info, 2: Error") # NOT YET IMPLEMENTED.
parser.add_argument("--pathToConfig", type=str, default="clusterConfig.json",
help="The full path to the cluster configuration files")
parser.add_argument('--install', default=False, action='store_true',
help="When this option is set, it will install required pip packages to all servers")
parser.add_argument('--profile', default=False, action='store_true',
help="To launch runtimes with night system profiling.")
parser.add_argument("--be_batch_size", type=int, default=0,
help="launch runtimes with be beatch size")
parser.add_argument('--cpp', default=False, action='store_true',
help="To launch CPP version runtimes.")
parser.add_argument('--manualLaunch', default=False, action='store_true',
help="Do not runtimes automatically. Primarily for using gdb on runtime processes.")
parser.add_argument("--logdir", type=str, default="", help="Full path of log directory")
# For installing nsys.. (with other cuda toolkit..)
# wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-ubuntu1804.pin
# sudo mv cuda-ubuntu1804.pin /etc/apt/preferences.d/cuda-repository-pin-600
# sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub
# sudo add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/ /"
# sudo apt-get update
# sudo apt-get -y install cuda
return parser.parse_known_args()
def main():
global args, extra_args
args, extra_args = parse_args()
# clusterConfig = json.load(open(args.pathToConfig))
global rankToIpMap
rankToIpMap = {}
commGrpRanksWorld = []
locations = []
# for serverConfig in clusterConfig["serverList"]:
# print("Found %s" % str(serverConfig))
port = 11250
for i in range(torch.cuda.device_count()):
rankToIpMap[str(len(locations))] = f"127.0.0.1:{port}"
commGrpRanksWorld.append(len(locations))
locations.append(Location("127.0.0.1", port, i, None, None, args.cpp))
port += 1
addrToBindCombo = re.split('[-:]', args.addrToBind)
addrToBind = addrToBindCombo[0]
portToBind = int(addrToBindCombo[1])
coordinator = ClusterCoordinator(addrToBind, portToBind, locations, os.getcwd(), args.be_batch_size)
if args.install:
coordinator.installPackages()
# Just make sure there's no previously left runtimes.
# CPP runtimes seem to terminate appropriately. So, there's no need to shutdown leftovers.
if not args.cpp:
print("Cleaning up potentially leftover runtime servers from previous experiment.")
coordinator.shutdownRuntimeAll()
time.sleep(10)
coordinator.launchRuntimeAll(args.c10dBackend, profile=args.profile, cppRuntime=args.cpp, manualLaunch=args.manualLaunch)
print("All runtime nodes are up and running. Now, initializing communication backend..")
coordinator.initCommBackendAll(args.c10dBackend, commGrpRanksWorld)
print("Communication backends are ready at all locations.")
print("Now, cluster is ready to accept training jobs.")
sys.stdout.flush()
coordinator.timeout = 1
while not HAS_EXCEPTION:
coordinator.handle_request()
time.sleep(5)
if __name__ == "__main__":
main()
|
ayylmao.py
|
from Tkinter import *
from ScrolledText import *
import socket, select, sys, time, json, threading
BS = 16
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
unpad = lambda s : s[0:-ord(s[-1])]
class Server:
def __init__(self, app):
self.app = app
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind(('localhost', 32667))
self.server.listen(10)
self.inputs = [self.server]
self.count = 1000
self.t = threading.Thread(target=self.startThread)
self.t.daemon = True
self.t.start()
self.unpad = lambda s : s[0:-ord(s[-1])]
def startThread(self):
while 1:
read, write, error = select.select(self.inputs, [], [])
for s in read:
if s == self.server:
cs, addr = self.server.accept()
self.inputs.append(cs)
else:
data = s.recv(4096)
if data:
send = dict()
try:
data = json.loads(data)
except ValueError:
#data = json.loads(unpad(data))
self.app.log(data)
if data['type'] == 'shutdown':
self.inputs.remove(s)
self.app.log('User "%s" has disconnected.' % (data['name']))
send['que'] = 'S'
send['name'] = 'SERVER'
send['msg'] = '%s has disconnected from the server' % (data['name'])
for i in self.inputs:
if i != s and i != self.server:
i.send(json.dumps(send))
elif data['type'] == 'hello':
s.send(str(self.count))
self.app.log('Assigned name "anon%s" to new user.' % (str(self.count)))
self.count += 1
elif data['type'] == 'login':
self.app.log('User logged in as "%s".' % (data['name']))
send['que'] = 'S'
send['name'] = 'SERVER'
send['msg'] = 'Welcome to the server. There are currently %i users online.' % (len(self.inputs) - 2)
s.send(json.dumps(send))
send = dict()
send['que'] = 'S'
send['name'] = 'SERVER'
send['msg'] = '%s has connected to the server' % (data['name'])
for i in self.inputs:
if i != s and i != self.server:
i.send(json.dumps(send))
elif data['type'] == 'namechange':
send['que'] = 'S'
send['name'] = 'SERVER'
send['msg'] = '%s changed their name to %s.' % (data['msg'], data['name'])
self.app.log('User "%s" has changed their name to "%s".' % (data['msg'], data['name']))
for i in self.inputs:
if i != s and i != self.server:
i.send(json.dumps(send))
else:
self.app.log('User "%s" sent a message: "%s".' % (data['name'], data['msg']))
send['que'] = 'U'
send['name'] = data['name']
send['msg'] = data['msg']
for i in self.inputs:
if i != s and i != self.server:
i.send(json.dumps(send))
def shutdown(self):
for i in self.inputs:
if i != self.server:
i.send('{"que": "S", "name": "SERVER", "msg": "Server is shutting down!"}')
class Application(Frame):
def say(self, who, what):
self.chatbox['state'] = "normal"
str(what).replace('\n', '')
str(who).replace('\n', '')
self.chatbox.insert('end', "[" + str(who) + "]: " + str(what) + "\n")
self.chatbox.see('end')
self.chatbox['state'] = "disabled"
def log(self, what):
self.chatbox['state'] = "normal"
str(what).replace('\n', '')
self.chatbox.insert('end', str(what) + "\n")
self.chatbox.see('end')
self.chatbox['state'] = "disabled"
def createWidgets(self):
self.chatbox = ScrolledText(self)
self.chatbox['width'] = 50
self.chatbox['height'] = 24
self.chatbox['padx'] = 5
self.chatbox['pady'] = 5
self.chatbox['relief'] = 'flat'
self.chatbox['state'] = "disabled"
self.chatbox['font'] = ("Comic Sans MS", 10, "") # Intentionally coded Comic Sans
self.grid(row=0, column=1, padx=5, pady=5)
self.chatbox.pack()
self.separator = Frame(self)
self.separator['height'] = 10
self.separator.pack()
self.sendButton = Button(self)
self.sendButton["text"] = "Quit",
self.sendButton["command"] = self.quit
self.sendButton.pack()
def __init__(self, master):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
self.root = master
self.root.title('Chat Server')
self.s = Server(self)
root = Tk()
app = Application(root)
app.mainloop()
app.s.shutdown()
root.destroy()
|
asyncimagesender.py
|
from imagezmq.imagezmq import ImageSender
from queue import Queue
import time
import threading
from logging import getLogger
"""
Class used to simplify the sending of images in an asychronous fashion.
See tests/test_send_async_images.py for an example usage
See test/test_mac_receive_images_montage.py for an example of how to setup a montage window for
each of the test ImageSenders
NOTE: the tests ( but not this class ) assume you have OpenCV and imutils installed.
"""
class AsyncImageSender(object):
def __init__(self, server_name="server", server_ip='127.0.0.1', port=5555, send_timeout=0, recv_timeout=0,
show_frame_rate=0, backlog=0):
self.server_name = server_name
self.server_ip = server_ip
self.port = port
self.send_timeout = send_timeout
self.recv_timeout = recv_timeout
self.frame_queue = Queue()
self.background_thread = None
self.show_frame_rate = show_frame_rate
self.sender = None
self.backlog = backlog
def _create_sender(self):
connect_to = f'tcp://{self.server_ip}:{self.port}'
sender = ImageSender(connect_to=connect_to, send_timeout=self.send_timeout, recv_timeout=self.recv_timeout)
return sender
def _send_immediate(self, frame):
start = time.time()
frame_count = 0
try:
if self.show_frame_rate > 0:
frame_count += 1
delta = time.time() - start
if delta > self.show_frame_rate:
print(f"Sending {(frame_count / delta)} frames/sec")
start = time.time()
frame_count = 0
try:
hub_reply = self.sender.send_image(self.server_name, frame)
except Exception as exc:
getLogger("AsyncImageSender").error("send_image exception")
getLogger("AsyncImageSender").error(f"Exception msg: {exc}")
print(exc)
time.sleep(6) # something happened, force a timeout
raise TimeoutError
except TimeoutError:
getLogger("AsyncImageSender").error("Sending timeout.. reconnect to server")
self.sender = self._create_sender()
def _send_frame_background_function(self):
self.sender = self._create_sender()
while True:
frame = self.frame_queue.get()
self._send_immediate(frame)
def run_in_background(self):
self.background_thread = threading.Thread(target=self._send_frame_background_function, args=())
self.background_thread.daemon = True
self.background_thread.start()
def send_frame_async(self, frame):
if self.backlog > 0 and self.frame_queue.qsize() > self.backlog:
return
self.frame_queue.put_nowait(frame)
return
def send_frame_immediate(self, frame):
if self.background_thread is not None:
raise Exception("Cannot send a frame immediately if there is a background thread running")
if self.sender is None:
self.sender = self._create_sender()
self._send_immediate(frame)
return
def queue_size(self):
return self.frame_queue.qsize()
|
yt_gui_400x400.pyw
|
from file_path_adder import getpth
import internet
from tkinter import *
from tkinter import font
from tkinter.tix import Balloon,Tk
from PIL import Image, ImageTk
from io import BytesIO
from urllib.request import urlopen
from pywhatkit import playonyt
from threading import Thread
from webbrowser import open_new_tab as web
from vlc import State
from winsound import MessageBeep
import pafy
import vlc
play_btn_path = getpth("icons","play.png")
pause_btn_path = getpth("icons","pause.png")
forward_btn_path = getpth("icons","forward.png")
backward_btn_path = getpth("icons","backward.png")
class Stream:
song_title = ""
thumbnail = ""
thumbnail_url = ""
stream_url = ""
yt_url = ""
Instance = vlc.Instance()
player = Instance.media_player_new()
status = ""
def __init__(self,song_title,status):
self.song_title = song_title
self.status = status
self.show_status("fetching url")
self.yt_url = playonyt(self.song_title,open_video=False)
video_data = pafy.new(self.yt_url)
self.thumbnail_url = video_data.getbestthumb()
audio = video_data.getbestaudio()
self.stream_url = audio.url
media = self.Instance.media_new(self.stream_url)
# media.get_mrl()
self.player.set_media(media)
def get_thumbnail(self):
url_request = urlopen(self.thumbnail_url).read()
img = Image.open(BytesIO(url_request)).resize((320,180),Image.ANTIALIAS)
self.thumbnail = ImageTk.PhotoImage(img)
return self.thumbnail
def is_playing(self):
return self.player.is_playing()
def show_status(self,msg):
self.status.set(msg.title())
def backward(self):
current_time = self.player.get_time()
current_time -= 5000
self.player.set_time(current_time)
def forward(self):
current_time = self.player.get_time()
current_time += 5000
self.player.set_time(current_time)
def play_on_yt(self,event):
self.pause()
web(current_stream.yt_url)
def get_state(self):
return self.player.get_state()
def play(self):
global playRpauseBtn
self.show_status("playing")
playRpauseBtn.configure(image=pause_btn_image)
self.player.play()
def pause(self):
global playRpauseBtn
if self.is_playing():
self.player.pause()
self.show_status("Paused")
playRpauseBtn.configure(image=play_btn_image)
def stop(self):
print("Thread stopped..")
self.player.stop()
def playRpause():
global current_stream
current_song = song.get().strip()
if current_song =="":
pass
elif current_stream.song_title == current_song:
if current_stream.is_playing():
current_stream.pause()
else:
current_stream.play()
else:
current_stream.stop()
current_stream = Stream(song_title=current_song,status=curr_status)
current_stream.play()
img = current_stream.get_thumbnail()
thumbnail_img.configure(image = img)
thumbnail_img.image= img
def action():
Thread(target=playRpause).start()
def onclick(event):
song.delete(0,END)
show_status("Enter song or video title")
playRpauseBtn.configure(image=play_btn_image)
def show_status(msg):
curr_status.set(msg.title())
def is_played(root):
if current_stream.get_state() == State.Ended:
show_status("Enter song or video title")
current_stream.song_title = ""
playRpauseBtn.configure(image=play_btn_image)
root.after(100,is_played,root)
MessageBeep(16)
root = Tk()
root.geometry("400x400")
root.wm_resizable(False,False)
root.wm_title("yT Player")
balloon = Balloon(root)
curr_status = StringVar()
status = Label(textvariable=curr_status,font=("",10))
current_stream = Stream(song_title="Savatha pulla",status=curr_status)
show_status("default song is set")
default_logo = current_stream.get_thumbnail()
thumbnail_img = Label(image=default_logo,cursor="hand2")
thumbnail_img.place(anchor="n",rely=0.05,relx=0.5)
thumbnail_img.bind("<Button>",func=current_stream.play_on_yt)
song = Entry(font=("",15),justify="center")
song.insert(INSERT,current_stream.song_title)
song.place(relx=0.5,rely=0.58,anchor="n")
song.bind("<Button>",func=onclick)
status.place(relx=0.5,rely=0.7,anchor="n")
play_btn_image = ImageTk.PhotoImage(Image.open(play_btn_path).resize((40,40),Image.ANTIALIAS))
pause_btn_image = ImageTk.PhotoImage(Image.open(pause_btn_path).resize((40,40)),Image.ANTIALIAS)
forward_btn_image = ImageTk.PhotoImage(Image.open(forward_btn_path).resize((40,40)),Image.ANTIALIAS)
backward_btn_image = ImageTk.PhotoImage(Image.open(backward_btn_path).resize((40,40)),Image.ANTIALIAS)
backward_btn_left = Button(image=backward_btn_image,command=current_stream.backward,cursor="hand2")
backward_btn_left.place(relx=0.25,rely=0.8,anchor="n")
playRpauseBtn = Button(image = play_btn_image,command=action,font=("",15,font.BOLD),cursor="hand2")
playRpauseBtn.place(relx=0.5,rely=0.8,anchor="n")
forward_btn_right = Button(image=forward_btn_image,command = current_stream.forward,cursor="hand2")
forward_btn_right.place(relx=0.75,rely=0.8,anchor="n")
balloon.bind_widget(thumbnail_img,balloonmsg="click to open the video in youtube")
balloon.bind_widget(forward_btn_right,balloonmsg="+5 sec")
balloon.bind_widget(backward_btn_left,balloonmsg="-5 sec")
balloon.bind_widget(playRpauseBtn,balloonmsg="play/pause")
root.after(100,is_played,root)
root.mainloop()
|
fn_api_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PipelineRunner using the SDK harness.
"""
import collections
import copy
import logging
import Queue as queue
import threading
import time
from concurrent import futures
import grpc
import apache_beam as beam # pylint: disable=ungrouped-imports
from apache_beam import metrics
from apache_beam.coders import WindowedValueCoder
from apache_beam.coders import registry
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.internal import pickler
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners import runner
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.transforms import trigger
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import proto_utils
from apache_beam.utils import urns
# This module is experimental. No backwards-compatibility guarantees.
class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer):
_DONE = object()
def __init__(self):
self._push_queue = queue.Queue()
self._futures_by_id = dict()
self._read_thread = threading.Thread(
name='beam_control_read', target=self._read)
self._started = False
self._uid_counter = 0
def Control(self, iterator, context):
self._inputs = iterator
# Note: We only support one client for now.
self._read_thread.start()
self._started = True
while True:
to_push = self._push_queue.get()
if to_push is self._DONE:
return
yield to_push
def _read(self):
for data in self._inputs:
self._futures_by_id.pop(data.instruction_id).set(data)
def push(self, item):
if item is self._DONE:
future = None
else:
if not item.instruction_id:
self._uid_counter += 1
item.instruction_id = 'control_%s' % self._uid_counter
future = ControlFuture(item.instruction_id)
self._futures_by_id[item.instruction_id] = future
self._push_queue.put(item)
return future
def done(self):
self.push(self._DONE)
# Can't join a thread before it's started.
while not self._started:
time.sleep(.01)
self._read_thread.join()
class _GroupingBuffer(object):
"""Used to accumulate groupded (shuffled) results."""
def __init__(self, pre_grouped_coder, post_grouped_coder, windowing):
self._key_coder = pre_grouped_coder.key_coder()
self._pre_grouped_coder = pre_grouped_coder
self._post_grouped_coder = post_grouped_coder
self._table = collections.defaultdict(list)
self._windowing = windowing
def append(self, elements_data):
input_stream = create_InputStream(elements_data)
coder_impl = self._pre_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
# TODO(robertwb): We could optimize this even more by using a
# window-dropping coder for the data plane.
is_trivial_windowing = self._windowing.is_default()
while input_stream.size() > 0:
windowed_key_value = coder_impl.decode_from_stream(input_stream, True)
key, value = windowed_key_value.value
self._table[key_coder_impl.encode(key)].append(
value if is_trivial_windowing
else windowed_key_value.with_value(value))
def __iter__(self):
output_stream = create_OutputStream()
if self._windowing.is_default():
globally_window = GlobalWindows.windowed_value(None).with_value
windowed_key_values = lambda key, values: [globally_window((key, values))]
else:
trigger_driver = trigger.create_trigger_driver(self._windowing, True)
windowed_key_values = trigger_driver.process_entire_key
coder_impl = self._post_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for encoded_key, windowed_values in self._table.items():
key = key_coder_impl.decode(encoded_key)
for wkvs in windowed_key_values(key, windowed_values):
coder_impl.encode_to_stream(wkvs, output_stream, True)
return iter([output_stream.get()])
class _WindowGroupingBuffer(object):
"""Used to partition windowed side inputs."""
def __init__(self, side_input_data):
# Here's where we would use a different type of partitioning
# (e.g. also by key) for a different access pattern.
assert side_input_data.access_pattern == urns.ITERABLE_ACCESS
self._windowed_value_coder = side_input_data.coder
self._window_coder = side_input_data.coder.window_coder
self._value_coder = side_input_data.coder.wrapped_value_coder
self._values_by_window = collections.defaultdict(list)
def append(self, elements_data):
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_value = self._windowed_value_coder.get_impl(
).decode_from_stream(input_stream, True)
for window in windowed_value.windows:
self._values_by_window[window].append(windowed_value.value)
def items(self):
value_coder_impl = self._value_coder.get_impl()
for window, values in self._values_by_window.items():
encoded_window = self._window_coder.encode(window)
output_stream = create_OutputStream()
for value in values:
value_coder_impl.encode_to_stream(value, output_stream, True)
yield encoded_window, output_stream.get()
class FnApiRunner(runner.PipelineRunner):
def __init__(self, use_grpc=False, sdk_harness_factory=None):
"""Creates a new Fn API Runner.
Args:
use_grpc: whether to use grpc or simply make in-process calls
defaults to False
sdk_harness_factory: callable used to instantiate customized sdk harnesses
typcially not set by users
"""
super(FnApiRunner, self).__init__()
self._last_uid = -1
self._use_grpc = use_grpc
if sdk_harness_factory and not use_grpc:
raise ValueError('GRPC must be used if a harness factory is provided.')
self._sdk_harness_factory = sdk_harness_factory
self._progress_frequency = None
def _next_uid(self):
self._last_uid += 1
return str(self._last_uid)
def run_pipeline(self, pipeline):
MetricsEnvironment.set_metrics_supported(False)
return self.run_via_runner_api(pipeline.to_runner_api())
def run_via_runner_api(self, pipeline_proto):
return self.run_stages(*self.create_stages(pipeline_proto))
def create_stages(self, pipeline_proto):
# First define a couple of helpers.
def union(a, b):
# Minimize the number of distinct sets.
if not a or a == b:
return b
elif not b:
return a
else:
return frozenset.union(a, b)
class Stage(object):
"""A set of Transforms that can be sent to the worker for processing."""
def __init__(self, name, transforms,
downstream_side_inputs=None, must_follow=frozenset()):
self.name = name
self.transforms = transforms
self.downstream_side_inputs = downstream_side_inputs
self.must_follow = must_follow
def __repr__(self):
must_follow = ', '.join(prev.name for prev in self.must_follow)
downstream_side_inputs = ', '.join(
str(si) for si in self.downstream_side_inputs)
return "%s\n %s\n must follow: %s\n downstream_side_inputs: %s" % (
self.name,
'\n'.join(["%s:%s" % (transform.unique_name, transform.spec.urn)
for transform in self.transforms]),
must_follow,
downstream_side_inputs)
def can_fuse(self, consumer):
def no_overlap(a, b):
return not a.intersection(b)
return (
not self in consumer.must_follow
and not self.is_flatten() and not consumer.is_flatten()
and no_overlap(self.downstream_side_inputs, consumer.side_inputs()))
def fuse(self, other):
return Stage(
"(%s)+(%s)" % (self.name, other.name),
self.transforms + other.transforms,
union(self.downstream_side_inputs, other.downstream_side_inputs),
union(self.must_follow, other.must_follow))
def is_flatten(self):
return any(transform.spec.urn == urns.FLATTEN_TRANSFORM
for transform in self.transforms)
def side_inputs(self):
for transform in self.transforms:
if transform.spec.urn == urns.PARDO_TRANSFORM:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for side_input in payload.side_inputs:
yield transform.inputs[side_input]
def has_as_main_input(self, pcoll):
for transform in self.transforms:
if transform.spec.urn == urns.PARDO_TRANSFORM:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
local_side_inputs = payload.side_inputs
else:
local_side_inputs = {}
for local_id, pipeline_id in transform.inputs.items():
if pcoll == pipeline_id and local_id not in local_side_inputs:
return True
def deduplicate_read(self):
seen_pcolls = set()
new_transforms = []
for transform in self.transforms:
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
pcoll = only_element(transform.outputs.items())[1]
if pcoll in seen_pcolls:
continue
seen_pcolls.add(pcoll)
new_transforms.append(transform)
self.transforms = new_transforms
# Now define the "optimization" phases.
safe_coders = {}
def lift_combiners(stages):
"""Expands CombinePerKey into pre- and post-grouping stages.
... -> CombinePerKey -> ...
becomes
... -> PreCombine -> GBK -> MergeAccumulators -> ExtractOutput -> ...
"""
def add_or_get_coder_id(coder_proto):
for coder_id, coder in pipeline_components.coders.items():
if coder == coder_proto:
return coder_id
new_coder_id = unique_name(pipeline_components.coders, 'coder')
pipeline_components.coders[new_coder_id].CopyFrom(coder_proto)
return new_coder_id
def windowed_coder_id(coder_id):
proto = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=urns.WINDOWED_VALUE_CODER)),
component_coder_ids=[coder_id, window_coder_id])
return add_or_get_coder_id(proto)
for stage in stages:
assert len(stage.transforms) == 1
transform = stage.transforms[0]
if transform.spec.urn == urns.COMBINE_PER_KEY_TRANSFORM:
combine_payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.CombinePayload)
input_pcoll = pipeline_components.pcollections[only_element(
transform.inputs.values())]
output_pcoll = pipeline_components.pcollections[only_element(
transform.outputs.values())]
windowed_input_coder = pipeline_components.coders[
input_pcoll.coder_id]
element_coder_id, window_coder_id = (
windowed_input_coder.component_coder_ids)
element_coder = pipeline_components.coders[element_coder_id]
key_coder_id, _ = element_coder.component_coder_ids
accumulator_coder_id = combine_payload.accumulator_coder_id
key_accumulator_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=urns.KV_CODER)),
component_coder_ids=[key_coder_id, accumulator_coder_id])
key_accumulator_coder_id = add_or_get_coder_id(key_accumulator_coder)
accumulator_iter_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=urns.ITERABLE_CODER)),
component_coder_ids=[accumulator_coder_id])
accumulator_iter_coder_id = add_or_get_coder_id(
accumulator_iter_coder)
key_accumulator_iter_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=urns.KV_CODER)),
component_coder_ids=[key_coder_id, accumulator_iter_coder_id])
key_accumulator_iter_coder_id = add_or_get_coder_id(
key_accumulator_iter_coder)
precombined_pcoll_id = unique_name(
pipeline_components.pcollections, 'pcollection')
pipeline_components.pcollections[precombined_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=transform.unique_name + '/Precombine.out',
coder_id=windowed_coder_id(key_accumulator_coder_id),
windowing_strategy_id=input_pcoll.windowing_strategy_id,
is_bounded=input_pcoll.is_bounded))
grouped_pcoll_id = unique_name(
pipeline_components.pcollections, 'pcollection')
pipeline_components.pcollections[grouped_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=transform.unique_name + '/Group.out',
coder_id=windowed_coder_id(key_accumulator_iter_coder_id),
windowing_strategy_id=output_pcoll.windowing_strategy_id,
is_bounded=output_pcoll.is_bounded))
merged_pcoll_id = unique_name(
pipeline_components.pcollections, 'pcollection')
pipeline_components.pcollections[merged_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=transform.unique_name + '/Merge.out',
coder_id=windowed_coder_id(key_accumulator_coder_id),
windowing_strategy_id=output_pcoll.windowing_strategy_id,
is_bounded=output_pcoll.is_bounded))
def make_stage(base_stage, transform):
return Stage(
transform.unique_name,
[transform],
downstream_side_inputs=base_stage.downstream_side_inputs,
must_follow=base_stage.must_follow)
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Precombine',
spec=beam_runner_api_pb2.FunctionSpec(
urn=urns.PRECOMBINE_TRANSFORM,
payload=transform.spec.payload),
inputs=transform.inputs,
outputs={'out': precombined_pcoll_id}))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Group',
spec=beam_runner_api_pb2.FunctionSpec(
urn=urns.GROUP_BY_KEY_TRANSFORM),
inputs={'in': precombined_pcoll_id},
outputs={'out': grouped_pcoll_id}))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Merge',
spec=beam_runner_api_pb2.FunctionSpec(
urn=urns.MERGE_ACCUMULATORS_TRANSFORM,
payload=transform.spec.payload),
inputs={'in': grouped_pcoll_id},
outputs={'out': merged_pcoll_id}))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/ExtractOutputs',
spec=beam_runner_api_pb2.FunctionSpec(
urn=urns.EXTRACT_OUTPUTS_TRANSFORM,
payload=transform.spec.payload),
inputs={'in': merged_pcoll_id},
outputs=transform.outputs))
else:
yield stage
def expand_gbk(stages):
"""Transforms each GBK into a write followed by a read.
"""
good_coder_urns = set(beam.coders.Coder._known_urns.keys()) - set([
urns.PICKLED_CODER])
coders = pipeline_components.coders
for coder_id, coder_proto in coders.items():
if coder_proto.spec.spec.urn == urns.BYTES_CODER:
bytes_coder_id = coder_id
break
else:
bytes_coder_id = unique_name(coders, 'bytes_coder')
pipeline_components.coders[bytes_coder_id].CopyFrom(
beam.coders.BytesCoder().to_runner_api(None))
coder_substitutions = {}
def wrap_unknown_coders(coder_id, with_bytes):
if (coder_id, with_bytes) not in coder_substitutions:
wrapped_coder_id = None
coder_proto = coders[coder_id]
if coder_proto.spec.spec.urn == urns.LENGTH_PREFIX_CODER:
coder_substitutions[coder_id, with_bytes] = (
bytes_coder_id if with_bytes else coder_id)
elif coder_proto.spec.spec.urn in good_coder_urns:
wrapped_components = [wrap_unknown_coders(c, with_bytes)
for c in coder_proto.component_coder_ids]
if wrapped_components == list(coder_proto.component_coder_ids):
# Use as is.
coder_substitutions[coder_id, with_bytes] = coder_id
else:
wrapped_coder_id = unique_name(
coders,
coder_id + ("_bytes" if with_bytes else "_len_prefix"))
coders[wrapped_coder_id].CopyFrom(coder_proto)
coders[wrapped_coder_id].component_coder_ids[:] = [
wrap_unknown_coders(c, with_bytes)
for c in coder_proto.component_coder_ids]
coder_substitutions[coder_id, with_bytes] = wrapped_coder_id
else:
# Not a known coder.
if with_bytes:
coder_substitutions[coder_id, with_bytes] = bytes_coder_id
else:
wrapped_coder_id = unique_name(coders, coder_id + "_len_prefix")
len_prefix_coder_proto = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=urns.LENGTH_PREFIX_CODER)),
component_coder_ids=[coder_id])
coders[wrapped_coder_id].CopyFrom(len_prefix_coder_proto)
coder_substitutions[coder_id, with_bytes] = wrapped_coder_id
# This operation is idempotent.
if wrapped_coder_id:
coder_substitutions[wrapped_coder_id, with_bytes] = wrapped_coder_id
return coder_substitutions[coder_id, with_bytes]
def fix_pcoll_coder(pcoll):
new_coder_id = wrap_unknown_coders(pcoll.coder_id, False)
safe_coders[new_coder_id] = wrap_unknown_coders(pcoll.coder_id, True)
pcoll.coder_id = new_coder_id
for stage in stages:
assert len(stage.transforms) == 1
transform = stage.transforms[0]
if transform.spec.urn == urns.GROUP_BY_KEY_TRANSFORM:
for pcoll_id in transform.inputs.values():
fix_pcoll_coder(pipeline_components.pcollections[pcoll_id])
for pcoll_id in transform.outputs.values():
fix_pcoll_coder(pipeline_components.pcollections[pcoll_id])
# This is used later to correlate the read and write.
param = str("group:%s" % stage.name)
if stage.name not in pipeline_components.transforms:
pipeline_components.transforms[stage.name].CopyFrom(transform)
gbk_write = Stage(
transform.unique_name + '/Write',
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Write',
inputs=transform.inputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=param))],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
yield gbk_write
yield Stage(
transform.unique_name + '/Read',
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Read',
outputs=transform.outputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=param))],
downstream_side_inputs=stage.downstream_side_inputs,
must_follow=union(frozenset([gbk_write]), stage.must_follow))
else:
yield stage
def sink_flattens(stages):
"""Sink flattens and remove them from the graph.
A flatten that cannot be sunk/fused away becomes multiple writes (to the
same logical sink) followed by a read.
"""
# TODO(robertwb): Actually attempt to sink rather than always materialize.
# TODO(robertwb): Possibly fuse this into one of the stages.
pcollections = pipeline_components.pcollections
for stage in stages:
assert len(stage.transforms) == 1
transform = stage.transforms[0]
if transform.spec.urn == urns.FLATTEN_TRANSFORM:
# This is used later to correlate the read and writes.
param = str("materialize:%s" % transform.unique_name)
output_pcoll_id, = transform.outputs.values()
output_coder_id = pcollections[output_pcoll_id].coder_id
flatten_writes = []
for local_in, pcoll_in in transform.inputs.items():
if pcollections[pcoll_in].coder_id != output_coder_id:
# Flatten inputs must all be written with the same coder as is
# used to read them.
pcollections[pcoll_in].coder_id = output_coder_id
transcoded_pcollection = (
transform.unique_name + '/Transcode/' + local_in + '/out')
yield Stage(
transform.unique_name + '/Transcode/' + local_in,
[beam_runner_api_pb2.PTransform(
unique_name=
transform.unique_name + '/Transcode/' + local_in,
inputs={local_in: pcoll_in},
outputs={'out': transcoded_pcollection},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.IDENTITY_DOFN_URN))],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
pcollections[transcoded_pcollection].CopyFrom(
pcollections[pcoll_in])
pcollections[transcoded_pcollection].coder_id = output_coder_id
else:
transcoded_pcollection = pcoll_in
flatten_write = Stage(
transform.unique_name + '/Write/' + local_in,
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Write/' + local_in,
inputs={local_in: transcoded_pcollection},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=param))],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
flatten_writes.append(flatten_write)
yield flatten_write
yield Stage(
transform.unique_name + '/Read',
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Read',
outputs=transform.outputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=param))],
downstream_side_inputs=stage.downstream_side_inputs,
must_follow=union(frozenset(flatten_writes), stage.must_follow))
else:
yield stage
def annotate_downstream_side_inputs(stages):
"""Annotate each stage with fusion-prohibiting information.
Each stage is annotated with the (transitive) set of pcollections that
depend on this stage that are also used later in the pipeline as a
side input.
While theoretically this could result in O(n^2) annotations, the size of
each set is bounded by the number of side inputs (typically much smaller
than the number of total nodes) and the number of *distinct* side-input
sets is also generally small (and shared due to the use of union
defined above).
This representation is also amenable to simple recomputation on fusion.
"""
consumers = collections.defaultdict(list)
all_side_inputs = set()
for stage in stages:
for transform in stage.transforms:
for input in transform.inputs.values():
consumers[input].append(stage)
for si in stage.side_inputs():
all_side_inputs.add(si)
all_side_inputs = frozenset(all_side_inputs)
downstream_side_inputs_by_stage = {}
def compute_downstream_side_inputs(stage):
if stage not in downstream_side_inputs_by_stage:
downstream_side_inputs = frozenset()
for transform in stage.transforms:
for output in transform.outputs.values():
if output in all_side_inputs:
downstream_side_inputs = union(
downstream_side_inputs, frozenset([output]))
for consumer in consumers[output]:
downstream_side_inputs = union(
downstream_side_inputs,
compute_downstream_side_inputs(consumer))
downstream_side_inputs_by_stage[stage] = downstream_side_inputs
return downstream_side_inputs_by_stage[stage]
for stage in stages:
stage.downstream_side_inputs = compute_downstream_side_inputs(stage)
return stages
def greedily_fuse(stages):
"""Places transforms sharing an edge in the same stage, whenever possible.
"""
producers_by_pcoll = {}
consumers_by_pcoll = collections.defaultdict(list)
# Used to always reference the correct stage as the producer and
# consumer maps are not updated when stages are fused away.
replacements = {}
def replacement(s):
old_ss = []
while s in replacements:
old_ss.append(s)
s = replacements[s]
for old_s in old_ss[:-1]:
replacements[old_s] = s
return s
def fuse(producer, consumer):
fused = producer.fuse(consumer)
replacements[producer] = fused
replacements[consumer] = fused
# First record the producers and consumers of each PCollection.
for stage in stages:
for transform in stage.transforms:
for input in transform.inputs.values():
consumers_by_pcoll[input].append(stage)
for output in transform.outputs.values():
producers_by_pcoll[output] = stage
logging.debug('consumers\n%s', consumers_by_pcoll)
logging.debug('producers\n%s', producers_by_pcoll)
# Now try to fuse away all pcollections.
for pcoll, producer in producers_by_pcoll.items():
pcoll_as_param = str("materialize:%s" % pcoll)
write_pcoll = None
for consumer in consumers_by_pcoll[pcoll]:
producer = replacement(producer)
consumer = replacement(consumer)
# Update consumer.must_follow set, as it's used in can_fuse.
consumer.must_follow = frozenset(
replacement(s) for s in consumer.must_follow)
if producer.can_fuse(consumer):
fuse(producer, consumer)
else:
# If we can't fuse, do a read + write.
if write_pcoll is None:
write_pcoll = Stage(
pcoll + '/Write',
[beam_runner_api_pb2.PTransform(
unique_name=pcoll + '/Write',
inputs={'in': pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=pcoll_as_param))])
fuse(producer, write_pcoll)
if consumer.has_as_main_input(pcoll):
read_pcoll = Stage(
pcoll + '/Read',
[beam_runner_api_pb2.PTransform(
unique_name=pcoll + '/Read',
outputs={'out': pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=pcoll_as_param))],
must_follow=frozenset([write_pcoll]))
fuse(read_pcoll, consumer)
else:
consumer.must_follow = union(
consumer.must_follow, frozenset([write_pcoll]))
# Everything that was originally a stage or a replacement, but wasn't
# replaced, should be in the final graph.
final_stages = frozenset(stages).union(replacements.values()).difference(
replacements.keys())
for stage in final_stages:
# Update all references to their final values before throwing
# the replacement data away.
stage.must_follow = frozenset(replacement(s) for s in stage.must_follow)
# Two reads of the same stage may have been fused. This is unneeded.
stage.deduplicate_read()
return final_stages
def sort_stages(stages):
"""Order stages suitable for sequential execution.
"""
seen = set()
ordered = []
def process(stage):
if stage not in seen:
seen.add(stage)
for prev in stage.must_follow:
process(prev)
ordered.append(stage)
for stage in stages:
process(stage)
return ordered
# Now actually apply the operations.
pipeline_components = copy.deepcopy(pipeline_proto.components)
# Reify coders.
# TODO(BEAM-2717): Remove once Coders are already in proto.
coders = pipeline_context.PipelineContext(pipeline_components).coders
for pcoll in pipeline_components.pcollections.values():
if pcoll.coder_id not in coders:
window_coder = coders[
pipeline_components.windowing_strategies[
pcoll.windowing_strategy_id].window_coder_id]
coder = WindowedValueCoder(
registry.get_coder(pickler.loads(pcoll.coder_id)),
window_coder=window_coder)
pcoll.coder_id = coders.get_id(coder)
coders.populate_map(pipeline_components.coders)
known_composites = set(
[urns.GROUP_BY_KEY_TRANSFORM, urns.COMBINE_PER_KEY_TRANSFORM])
def leaf_transforms(root_ids):
for root_id in root_ids:
root = pipeline_proto.components.transforms[root_id]
if root.spec.urn in known_composites:
yield root_id
elif not root.subtransforms:
# Make sure its outputs are not a subset of its inputs.
if set(root.outputs.values()) - set(root.inputs.values()):
yield root_id
else:
for leaf in leaf_transforms(root.subtransforms):
yield leaf
# Initial set of stages are singleton leaf transforms.
stages = [
Stage(name, [pipeline_proto.components.transforms[name]])
for name in leaf_transforms(pipeline_proto.root_transform_ids)]
# Apply each phase in order.
for phase in [
annotate_downstream_side_inputs, lift_combiners, expand_gbk,
sink_flattens, greedily_fuse, sort_stages]:
logging.info('%s %s %s', '=' * 20, phase, '=' * 20)
stages = list(phase(stages))
logging.debug('Stages: %s', [str(s) for s in stages])
# Return the (possibly mutated) context and ordered set of stages.
return pipeline_components, stages, safe_coders
def run_stages(self, pipeline_components, stages, safe_coders):
if self._use_grpc:
controller = FnApiRunner.GrpcController(self._sdk_harness_factory)
else:
controller = FnApiRunner.DirectController()
metrics_by_stage = {}
try:
pcoll_buffers = collections.defaultdict(list)
for stage in stages:
metrics_by_stage[stage.name] = self.run_stage(
controller, pipeline_components, stage,
pcoll_buffers, safe_coders).process_bundle.metrics
finally:
controller.close()
return RunnerResult(runner.PipelineState.DONE, metrics_by_stage)
def run_stage(
self, controller, pipeline_components, stage, pcoll_buffers, safe_coders):
context = pipeline_context.PipelineContext(pipeline_components)
data_operation_spec = controller.data_operation_spec()
def extract_endpoints(stage):
# Returns maps of transform names to PCollection identifiers.
# Also mutates IO stages to point to the data data_operation_spec.
data_input = {}
data_side_input = {}
data_output = {}
for transform in stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
target = transform.unique_name, only_element(transform.outputs)
data_input[target] = pcoll_buffers[pcoll_id]
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
target = transform.unique_name, only_element(transform.inputs)
data_output[target] = pcoll_id
else:
raise NotImplementedError
if data_operation_spec:
transform.spec.payload = data_operation_spec.SerializeToString()
else:
transform.spec.payload = ""
elif transform.spec.urn == urns.PARDO_TRANSFORM:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag, si in payload.side_inputs.items():
data_side_input[transform.unique_name, tag] = (
'materialize:' + transform.inputs[tag],
beam.pvalue.SideInputData.from_runner_api(si, None))
return data_input, data_side_input, data_output
logging.info('Running %s', stage.name)
logging.debug(' %s', stage)
data_input, data_side_input, data_output = extract_endpoints(stage)
process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor(
id=self._next_uid(),
transforms={transform.unique_name: transform
for transform in stage.transforms},
pcollections=dict(pipeline_components.pcollections.items()),
coders=dict(pipeline_components.coders.items()),
windowing_strategies=dict(
pipeline_components.windowing_strategies.items()),
environments=dict(pipeline_components.environments.items()))
# Store the required side inputs into state.
for (transform_id, tag), (pcoll_id, si) in data_side_input.items():
elements_by_window = _WindowGroupingBuffer(si)
for element_data in pcoll_buffers[pcoll_id]:
elements_by_window.append(element_data)
for window, elements_data in elements_by_window.items():
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
ptransform_id=transform_id,
side_input_id=tag,
window=window))
controller.state_handler.blocking_append(state_key, elements_data, None)
def get_buffer(pcoll_id):
if pcoll_id.startswith('materialize:'):
if pcoll_id not in pcoll_buffers:
# Just store the data chunks for replay.
pcoll_buffers[pcoll_id] = list()
elif pcoll_id.startswith('group:'):
# This is a grouping write, create a grouping buffer if needed.
if pcoll_id not in pcoll_buffers:
original_gbk_transform = pcoll_id.split(':', 1)[1]
transform_proto = pipeline_components.transforms[
original_gbk_transform]
input_pcoll = only_element(transform_proto.inputs.values())
output_pcoll = only_element(transform_proto.outputs.values())
pre_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[input_pcoll].coder_id]]
post_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[output_pcoll].coder_id]]
windowing_strategy = context.windowing_strategies[
pipeline_components
.pcollections[output_pcoll].windowing_strategy_id]
pcoll_buffers[pcoll_id] = _GroupingBuffer(
pre_gbk_coder, post_gbk_coder, windowing_strategy)
else:
# These should be the only two identifiers we produce for now,
# but special side input writes may go here.
raise NotImplementedError(pcoll_id)
return pcoll_buffers[pcoll_id]
return BundleManager(
controller, get_buffer, process_bundle_descriptor,
self._progress_frequency).process_bundle(data_input, data_output)
# These classes are used to interact with the worker.
class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
def __init__(self):
self._lock = threading.Lock()
self._state = collections.defaultdict(list)
def blocking_get(self, state_key, instruction_reference=None):
with self._lock:
return ''.join(self._state[self._to_key(state_key)])
def blocking_append(self, state_key, data, instruction_reference=None):
with self._lock:
self._state[self._to_key(state_key)].append(data)
def blocking_clear(self, state_key, instruction_reference=None):
with self._lock:
del self._state[self._to_key(state_key)]
@staticmethod
def _to_key(state_key):
return state_key.SerializeToString()
class GrpcStateServicer(
StateServicer, beam_fn_api_pb2_grpc.BeamFnStateServicer):
def State(self, request_stream, context=None):
# Note that this eagerly mutates state, assuming any failures are fatal.
# Thus it is safe to ignore instruction_reference.
for request in request_stream:
if request.get:
yield beam_fn_api_pb2.StateResponse(
id=request.id,
get=beam_fn_api_pb2.StateGetResponse(
data=self.blocking_get(request.state_key)))
elif request.append:
self.blocking_append(request.state_key, request.append.data)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
append=beam_fn_api_pb2.AppendResponse())
elif request.clear:
self.blocking_clear(request.state_key)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
clear=beam_fn_api_pb2.ClearResponse())
class DirectController(object):
"""An in-memory controller for fn API control, state and data planes."""
def __init__(self):
self.state_handler = FnApiRunner.StateServicer()
self.control_handler = self
self.data_plane_handler = data_plane.InMemoryDataChannel()
self.worker = sdk_worker.SdkWorker(
self.state_handler, data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()), {})
self._uid_counter = 0
def push(self, request):
if not request.instruction_id:
self._uid_counter += 1
request.instruction_id = 'control_%s' % self._uid_counter
logging.debug('CONTROL REQUEST %s', request)
response = self.worker.do_instruction(request)
logging.debug('CONTROL RESPONSE %s', response)
return ControlFuture(request.instruction_id, response)
def done(self):
pass
def close(self):
pass
def data_operation_spec(self):
return None
class GrpcController(object):
"""An grpc based controller for fn API control, state and data planes."""
def __init__(self, sdk_harness_factory=None):
self.sdk_harness_factory = sdk_harness_factory
self.control_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10))
self.control_port = self.control_server.add_insecure_port('[::]:0')
self.data_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.control_handler = BeamFnControlServicer()
beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
self.data_plane_handler = data_plane.GrpcServerDataChannel()
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
# TODO(robertwb): Is sharing the control channel fine? Alternatively,
# how should this be plumbed?
self.state_handler = FnApiRunner.GrpcStateServicer()
beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server(
self.state_handler, self.control_server)
logging.info('starting control server on port %s', self.control_port)
logging.info('starting data server on port %s', self.data_port)
self.data_server.start()
self.control_server.start()
self.worker = self.sdk_harness_factory(
'localhost:%s' % self.control_port
) if self.sdk_harness_factory else sdk_worker.SdkHarness(
'localhost:%s' % self.control_port, worker_count=1)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
logging.info('starting worker')
self.worker_thread.start()
def data_operation_spec(self):
url = 'localhost:%s' % self.data_port
remote_grpc_port = beam_fn_api_pb2.RemoteGrpcPort()
remote_grpc_port.api_service_descriptor.url = url
return remote_grpc_port
def close(self):
self.control_handler.done()
self.worker_thread.join()
self.data_plane_handler.close()
self.control_server.stop(5).wait()
self.data_server.stop(5).wait()
class BundleManager(object):
_uid_counter = 0
def __init__(
self, controller, get_buffer, bundle_descriptor, progress_frequency=None):
self._controller = controller
self._get_buffer = get_buffer
self._bundle_descriptor = bundle_descriptor
self._registered = False
self._progress_frequency = progress_frequency
def process_bundle(self, inputs, expected_outputs):
# Unique id for the instruction processing this bundle.
BundleManager._uid_counter += 1
process_bundle_id = 'bundle_%s' % BundleManager._uid_counter
# Register the bundle descriptor, if needed.
if not self._registered:
process_bundle_registration = beam_fn_api_pb2.InstructionRequest(
register=beam_fn_api_pb2.RegisterRequest(
process_bundle_descriptor=[self._bundle_descriptor]))
self._controller.control_handler.push(process_bundle_registration)
self._registered = True
# Write all the input data to the channel.
for (transform_id, name), elements in inputs.items():
data_out = self._controller.data_plane_handler.output_stream(
process_bundle_id, beam_fn_api_pb2.Target(
primitive_transform_reference=transform_id, name=name))
for element_data in elements:
data_out.write(element_data)
data_out.close()
# Actually start the bundle.
process_bundle = beam_fn_api_pb2.InstructionRequest(
instruction_id=process_bundle_id,
process_bundle=beam_fn_api_pb2.ProcessBundleRequest(
process_bundle_descriptor_reference=self._bundle_descriptor.id))
result_future = self._controller.control_handler.push(process_bundle)
with ProgressRequester(
self._controller, process_bundle_id, self._progress_frequency):
# Gather all output data.
expected_targets = [
beam_fn_api_pb2.Target(primitive_transform_reference=transform_id,
name=output_name)
for (transform_id, output_name), _ in expected_outputs.items()]
logging.debug('Gather all output data from %s.', expected_targets)
for output in self._controller.data_plane_handler.input_elements(
process_bundle_id, expected_targets):
target_tuple = (
output.target.primitive_transform_reference, output.target.name)
if target_tuple in expected_outputs:
self._get_buffer(expected_outputs[target_tuple]).append(output.data)
logging.debug('Wait for the bundle to finish.')
result = result_future.get()
if result.error:
raise RuntimeError(result.error)
return result
class ProgressRequester(threading.Thread):
def __init__(self, controller, instruction_id, frequency, callback=None):
super(ProgressRequester, self).__init__()
self._controller = controller
self._instruction_id = instruction_id
self._frequency = frequency
self._done = False
self._latest_progress = None
self._callback = callback
self.daemon = True
def __enter__(self):
if self._frequency:
self.start()
def __exit__(self, *unused_exc_info):
if self._frequency:
self.stop()
def run(self):
while not self._done:
try:
progress_result = self._controller.control_handler.push(
beam_fn_api_pb2.InstructionRequest(
process_bundle_progress=
beam_fn_api_pb2.ProcessBundleProgressRequest(
instruction_reference=self._instruction_id))).get()
self._latest_progress = progress_result.process_bundle_progress
if self._callback:
self._callback(self._latest_progress)
except Exception, exn:
logging.error("Bad progress: %s", exn)
time.sleep(self._frequency)
def stop(self):
self._done = True
class ControlFuture(object):
def __init__(self, instruction_id, response=None):
self.instruction_id = instruction_id
if response:
self._response = response
else:
self._response = None
self._condition = threading.Condition()
def set(self, response):
with self._condition:
self._response = response
self._condition.notify_all()
def get(self, timeout=None):
if not self._response:
with self._condition:
if not self._response:
self._condition.wait(timeout)
return self._response
class FnApiMetrics(metrics.metric.MetricResults):
def __init__(self, step_metrics):
self._counters = {}
self._distributions = {}
for step_metric in step_metrics.values():
for proto in step_metric.user:
key = metrics.execution.MetricKey.from_runner_api(proto.key)
if proto.HasField('counter_data'):
self._counters[key] = proto.counter_data.value
elif proto.HasField('distribution_data'):
self._distributions[
key] = metrics.cells.DistributionData.from_runner_api(
proto.distribution_data)
def query(self, filter=None):
counters = [metrics.execution.MetricResult(k, v, v)
for k, v in self._counters.items()
if self.matches(filter, k)]
distributions = [metrics.execution.MetricResult(k, v, v)
for k, v in self._distributions.items()
if self.matches(filter, k)]
return {'counters': counters,
'distributions': distributions}
class RunnerResult(runner.PipelineResult):
def __init__(self, state, metrics_by_stage):
super(RunnerResult, self).__init__(state)
self._metrics_by_stage = metrics_by_stage
self._user_metrics = None
def wait_until_finish(self, duration=None):
return self._state
def metrics(self):
if self._user_metrics is None:
self._user_metrics = FnApiMetrics(self._metrics_by_stage)
return self._user_metrics
def only_element(iterable):
element, = iterable
return element
def unique_name(existing, prefix):
if prefix in existing:
counter = 0
while True:
counter += 1
prefix_counter = prefix + "_%s" % counter
if prefix_counter not in existing:
return prefix_counter
else:
return prefix
|
serve.py
|
# Most of this code is:
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# The server command includes the additional header:
# For discussion of daemonizing:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
# Code taken also from QP:
# http://www.mems-exchange.org/software/qp/
# From lib/site.py
# Galaxy originally used PasteScript and PasteDeploy for application
# loading, to maintain compatibility we've internalized some of that
# code here, stripping out uneeded functionality.
# All top level imports from each package moved here and organized
from __future__ import print_function
import ConfigParser
import atexit
import errno
import optparse
import os
import re
import subprocess
import sys
import textwrap
import threading
import time
import logging
from logging.config import fileConfig
from loadwsgi import loadapp, loadserver
difflib = None
# ---- from paste.script.bool_optparse --------------------------------
"""
A subclass of ``optparse.OptionParser`` that allows boolean long
options (like ``--verbose``) to also take arguments (like
``--verbose=true``). Arguments *must* use ``=``.
"""
try:
_ = optparse._
except AttributeError:
from gettext import gettext as _
class BoolOptionParser(optparse.OptionParser):
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
value = rargs[0].lower().strip()
del rargs[0:1]
if value in ('true', 'yes', 'on', '1', 'y', 't'):
value = None
elif value in ('false', 'no', 'off', '0', 'n', 'f'):
# Don't process
return
else:
self.error(_('%s option takes a boolean value only (true/false)') % opt)
else:
value = None
option.process(opt, value, values, self)
# ---- from paste.script.command --------------------------------------
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
class BadCommand(Exception):
def __init__(self, message, exit_code=2):
self.message = message
self.exit_code = exit_code
Exception.__init__(self, message)
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation
in BaseException."""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation
in BaseException."""
self.__message = value
# BaseException.message has been deprecated since Python 2.6.
# To prevent DeprecationWarning from popping up over this
# pre-existing attribute, use a new property that takes lookup
# precedence.
message = property(_get_message, _set_message)
class NoDefault(object):
pass
# run and invoke methods moved below ServeCommand
class Command(object):
def __init__(self, name):
self.command_name = name
max_args = None
max_args_error = 'You must provide no more than %(max_args)s arguments'
min_args = None
min_args_error = 'You must provide at least %(min_args)s arguments'
required_args = None
# If this command takes a configuration file, set this to 1 or -1
# Then if invoked through #! the config file will be put into the positional
# arguments -- at the beginning with 1, at the end with -1
takes_config_file = None
# Grouped in help messages by this:
group_name = ''
required_args = ()
description = None
usage = ''
hidden = False
# This is the default verbosity level; --quiet subtracts,
# --verbose adds:
default_verbosity = 0
# This is the default interactive state:
default_interactive = 0
return_code = 0
BadCommand = BadCommand
# Must define:
# parser
# summary
# command()
def run(self, args):
self.parse_args(args)
# Setup defaults:
for name, default in [('verbose', 0),
('quiet', 0),
('interactive', False),
('overwrite', False)]:
if not hasattr(self.options, name):
setattr(self.options, name, default)
if getattr(self.options, 'simulate', False):
self.options.verbose = max(self.options.verbose, 1)
self.interactive = self.default_interactive
if getattr(self.options, 'interactive', False):
self.interactive += self.options.interactive
if getattr(self.options, 'no_interactive', False):
self.interactive = False
self.verbose = self.default_verbosity
self.verbose += self.options.verbose
self.verbose -= self.options.quiet
self.simulate = getattr(self.options, 'simulate', False)
# For #! situations:
if (os.environ.get('PASTE_CONFIG_FILE') and self.takes_config_file is not None):
take = self.takes_config_file
filename = os.environ.get('PASTE_CONFIG_FILE')
if take == 1:
self.args.insert(0, filename)
elif take == -1:
self.args.append(filename)
else:
assert 0, (
"Value takes_config_file must be None, 1, or -1 (not %r)"
% take)
if (os.environ.get('PASTE_DEFAULT_QUIET')):
self.verbose = 0
# Validate:
if self.min_args is not None and len(self.args) < self.min_args:
raise BadCommand(
self.min_args_error % {'min_args': self.min_args,
'actual_args': len(self.args)})
if self.max_args is not None and len(self.args) > self.max_args:
raise BadCommand(
self.max_args_error % {'max_args': self.max_args,
'actual_args': len(self.args)})
for var_name, option_name in self.required_args:
if not getattr(self.options, var_name, None):
raise BadCommand(
'You must provide the option %s' % option_name)
result = self.command()
if result is None:
return self.return_code
else:
return result
def parse_args(self, args):
if self.usage:
usage = ' ' + self.usage
else:
usage = ''
self.parser.usage = "%%prog [options]%s\n%s" % (
usage, self.summary)
self.parser.prog = self._prog_name()
if self.description:
desc = self.description
desc = textwrap.dedent(desc)
self.parser.description = desc
self.options, self.args = self.parser.parse_args(args)
def _prog_name(self):
return '%s %s' % (os.path.basename(sys.argv[0]), self.command_name)
########################################
# Utility methods
########################################
def pad(self, s, length, dir='left'):
if len(s) >= length:
return s
if dir == 'left':
return s + ' ' * (length - len(s))
else:
return ' ' * (length - len(s)) + s
def standard_parser(cls, verbose=True,
interactive=False,
no_interactive=False,
simulate=False,
quiet=False,
overwrite=False):
"""
Create a standard ``OptionParser`` instance.
Typically used like::
class MyCommand(Command):
parser = Command.standard_parser()
Subclasses may redefine ``standard_parser``, so use the
nearest superclass's class method.
"""
parser = BoolOptionParser()
if verbose:
parser.add_option('-v', '--verbose',
action='count',
dest='verbose',
default=0)
if quiet:
parser.add_option('-q', '--quiet',
action='count',
dest='quiet',
default=0)
if no_interactive:
parser.add_option('--no-interactive',
action="count",
dest="no_interactive",
default=0)
if interactive:
parser.add_option('-i', '--interactive',
action='count',
dest='interactive',
default=0)
if simulate:
parser.add_option('-n', '--simulate',
action='store_true',
dest='simulate',
default=False)
if overwrite:
parser.add_option('-f', '--overwrite',
dest="overwrite",
action="store_true",
help="Overwrite files (warnings will be emitted for non-matching files otherwise)")
return parser
standard_parser = classmethod(standard_parser)
def quote_first_command_arg(self, arg):
"""
There's a bug in Windows when running an executable that's
located inside a path with a space in it. This method handles
that case, or on non-Windows systems or an executable with no
spaces, it just leaves well enough alone.
"""
if (sys.platform != 'win32' or ' ' not in arg):
# Problem does not apply:
return arg
try:
import win32api
except ImportError:
raise ValueError(
"The executable %r contains a space, and in order to "
"handle this issue you must have the win32api module "
"installed" % arg)
arg = win32api.GetShortPathName(arg)
return arg
def parse_vars(self, args):
"""
Given variables like ``['a=b', 'c=d']`` turns it into ``{'a':
'b', 'c': 'd'}``
"""
result = {}
for arg in args:
if '=' not in arg:
raise BadCommand(
'Variable assignment %r invalid (no "=")'
% arg)
name, value = arg.split('=', 1)
result[name] = value
return result
def logging_file_config(self, config_file):
"""
Setup logging via the logging module's fileConfig function with the
specified ``config_file``, if applicable.
ConfigParser defaults are specified for the special ``__file__``
and ``here`` variables, similar to PasteDeploy config loading.
"""
parser = ConfigParser.ConfigParser()
parser.read([config_file])
if parser.has_section('loggers'):
config_file = os.path.abspath(config_file)
fileConfig(config_file, dict(__file__=config_file,
here=os.path.dirname(config_file)))
class NotFoundCommand(Command):
def run(self, args):
print('Command %r not known (you may need to run setup.py egg_info)'
% self.command_name)
commands = list()
commands.sort()
if not commands:
print('No commands registered.')
print('Have you installed Paste Script?')
print('(try running python setup.py develop)')
return 2
print('Known commands:')
longest = max([len(n) for n, c in commands])
for name, command in commands:
print(' %s %s' % (self.pad(name, length=longest),
command.load().summary))
return 2
# ---- From paste.script.serve ----------------------------------------
MAXFD = 1024
jython = sys.platform.startswith('java')
class DaemonizeException( Exception ):
pass
class ServeCommand(Command):
min_args = 0
usage = 'CONFIG_FILE [start|stop|restart|status] [var=value]'
takes_config_file = 1
summary = "Serve the described application"
description = """\
This command serves a web application that uses a paste.deploy
configuration file for the server and application.
If start/stop/restart is given, then --daemon is implied, and it will
start (normal operation), stop (--stop-daemon), or do both.
You can also include variable assignments like 'http_port=8080'
and then use %(http_port)s in your config files.
"""
# used by subclasses that configure apps and servers differently
requires_config_file = True
parser = Command.standard_parser(quiet=True)
parser.add_option('-n', '--app-name',
dest='app_name',
metavar='NAME',
help="Load the named application (default main)")
parser.add_option('-s', '--server',
dest='server',
metavar='SERVER_TYPE',
help="Use the named server.")
parser.add_option('--server-name',
dest='server_name',
metavar='SECTION_NAME',
help="Use the named server as defined in the configuration file (default: main)")
if hasattr(os, 'fork'):
parser.add_option('--daemon',
dest="daemon",
action="store_true",
help="Run in daemon (background) mode")
parser.add_option('--pid-file',
dest='pid_file',
metavar='FILENAME',
help="Save PID to file (default to paster.pid if running in daemon mode)")
parser.add_option('--log-file',
dest='log_file',
metavar='LOG_FILE',
help="Save output to the given log file (redirects stdout)")
parser.add_option('--reload',
dest='reload',
action='store_true',
help="Use auto-restart file monitor")
parser.add_option('--reload-interval',
dest='reload_interval',
default=1,
help="Seconds between checking files (low number can cause significant CPU usage)")
parser.add_option('--monitor-restart',
dest='monitor_restart',
action='store_true',
help="Auto-restart server if it dies")
parser.add_option('--status',
action='store_true',
dest='show_status',
help="Show the status of the (presumably daemonized) server")
if hasattr(os, 'setuid'):
# I don't think these are available on Windows
parser.add_option('--user',
dest='set_user',
metavar="USERNAME",
help="Set the user (usually only possible when run as root)")
parser.add_option('--group',
dest='set_group',
metavar="GROUP",
help="Set the group (usually only possible when run as root)")
parser.add_option('--stop-daemon',
dest='stop_daemon',
action='store_true',
help='Stop a daemonized server (given a PID file, or default paster.pid file)')
if jython:
parser.add_option('--disable-jython-reloader',
action='store_true',
dest='disable_jython_reloader',
help="Disable the Jython reloader")
_scheme_re = re.compile(r'^[a-z][a-z]+:', re.I)
default_verbosity = 1
_reloader_environ_key = 'PYTHON_RELOADER_SHOULD_RUN'
_monitor_environ_key = 'PASTE_MONITOR_SHOULD_RUN'
possible_subcommands = ('start', 'stop', 'restart', 'status')
def command(self):
if self.options.stop_daemon:
return self.stop_daemon()
if not hasattr(self.options, 'set_user'):
# Windows case:
self.options.set_user = self.options.set_group = None
# @@: Is this the right stage to set the user at?
self.change_user_group(
self.options.set_user, self.options.set_group)
if self.requires_config_file:
if not self.args:
raise BadCommand('You must give a config file')
app_spec = self.args[0]
if (len(self.args) > 1 and self.args[1] in self.possible_subcommands):
cmd = self.args[1]
restvars = self.args[2:]
else:
cmd = None
restvars = self.args[1:]
else:
app_spec = ""
if (self.args and self.args[0] in self.possible_subcommands):
cmd = self.args[0]
restvars = self.args[1:]
else:
cmd = None
restvars = self.args[:]
if (getattr(self.options, 'daemon', False) and
getattr(self.options, 'reload', False)):
raise BadCommand('The --daemon and --reload options may not be used together')
jython_monitor = False
if self.options.reload:
if jython and not self.options.disable_jython_reloader:
# JythonMonitor raises the special SystemRestart
# exception that'll cause the Jython interpreter to
# reload in the existing Java process (avoiding
# subprocess startup time)
try:
from paste.reloader import JythonMonitor
except ImportError:
pass
else:
jython_monitor = JythonMonitor(poll_interval=int(
self.options.reload_interval))
if self.requires_config_file:
jython_monitor.watch_file(self.args[0])
if not jython_monitor:
if os.environ.get(self._reloader_environ_key):
from paste import reloader
if self.verbose > 1:
print('Running reloading file monitor')
reloader.install(int(self.options.reload_interval))
if self.requires_config_file:
reloader.watch_file(self.args[0])
else:
return self.restart_with_reloader()
if cmd not in (None, 'start', 'stop', 'restart', 'status'):
raise BadCommand(
'Error: must give start|stop|restart (not %s)' % cmd)
if cmd == 'status' or self.options.show_status:
return self.show_status()
if cmd == 'restart' or cmd == 'stop':
result = self.stop_daemon()
if result:
print("Could not stop daemon")
# It's ok to continue trying to restart if stop_daemon returns
# a 1, otherwise shortcut and return.
if cmd == 'restart' and result != 1:
return result
if cmd == 'stop':
return result
self.options.daemon = True
if cmd == 'start':
self.options.daemon = True
app_name = self.options.app_name
vars = self.parse_vars(restvars)
if not self._scheme_re.search(app_spec):
app_spec = 'config:' + app_spec
server_name = self.options.server_name
if self.options.server:
server_spec = 'egg:PasteScript'
assert server_name is None
server_name = self.options.server
else:
server_spec = app_spec
base = os.getcwd()
if getattr(self.options, 'daemon', False):
if not self.options.pid_file:
self.options.pid_file = 'paster.pid'
if not self.options.log_file:
self.options.log_file = 'paster.log'
# Ensure the log file is writeable
if self.options.log_file:
try:
writeable_log_file = open(self.options.log_file, 'a')
except IOError as ioe:
msg = 'Error: Unable to write to log file: %s' % ioe
raise BadCommand(msg)
writeable_log_file.close()
# Ensure the pid file is writeable
if self.options.pid_file:
try:
writeable_pid_file = open(self.options.pid_file, 'a')
except IOError as ioe:
msg = 'Error: Unable to write to pid file: %s' % ioe
raise BadCommand(msg)
writeable_pid_file.close()
if getattr(self.options, 'daemon', False):
try:
self.daemonize()
except DaemonizeException as ex:
if self.verbose > 0:
print(str(ex))
return
if (self.options.monitor_restart and not
os.environ.get(self._monitor_environ_key)):
return self.restart_with_monitor()
if self.options.pid_file:
self.record_pid(self.options.pid_file)
if self.options.log_file:
stdout_log = LazyWriter(self.options.log_file, 'a')
sys.stdout = stdout_log
sys.stderr = stdout_log
logging.basicConfig(stream=stdout_log)
log_fn = app_spec
if log_fn.startswith('config:'):
log_fn = app_spec[len('config:'):]
elif log_fn.startswith('egg:'):
log_fn = None
if log_fn:
log_fn = os.path.join(base, log_fn)
self.logging_file_config(log_fn)
server = loadserver(server_spec, name=server_name, relative_to=base, global_conf=vars)
app = loadapp( app_spec, name=app_name, relative_to=base, global_conf=vars)
if self.verbose > 0:
if hasattr(os, 'getpid'):
msg = 'Starting server in PID %i.' % os.getpid()
else:
msg = 'Starting server.'
print(msg)
def serve():
try:
server(app)
except (SystemExit, KeyboardInterrupt) as e:
if self.verbose > 1:
raise
if str(e):
msg = ' ' + str(e)
else:
msg = ''
print('Exiting%s (-v to see traceback)' % msg)
except AttributeError as e:
# Capturing bad error response from paste
if str(e) == "'WSGIThreadPoolServer' object has no attribute 'thread_pool'":
import socket
raise socket.error(98, 'Address already in use')
else:
raise AttributeError(e)
if jython_monitor:
# JythonMonitor has to be ran from the main thread
threading.Thread(target=serve).start()
print('Starting Jython file monitor')
jython_monitor.periodic_reload()
else:
serve()
def daemonize(self):
pid = live_pidfile(self.options.pid_file)
if pid:
raise DaemonizeException(
"Daemon is already running (PID: %s from PID file %s)"
% (pid, self.options.pid_file))
if self.verbose > 0:
print('Entering daemon mode')
pid = os.fork()
if pid:
# The forked process also has a handle on resources, so we
# *don't* want proper termination of the process, we just
# want to exit quick (which os._exit() does)
os._exit(0)
# Make this the session leader
os.setsid()
# Fork again for good measure!
pid = os.fork()
if pid:
os._exit(0)
# @@: Should we set the umask and cwd now?
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
if (hasattr(os, "devnull")):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
def record_pid(self, pid_file):
pid = os.getpid()
if self.verbose > 1:
print('Writing PID %s to %s' % (pid, pid_file))
f = open(pid_file, 'w')
f.write(str(pid))
f.close()
atexit.register(_remove_pid_file, pid, pid_file, self.verbose)
def stop_daemon(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print('No PID file exists in %s' % pid_file)
return 1
pid = read_pidfile(pid_file)
if not pid:
print("Not a valid PID file in %s" % pid_file)
return 1
pid = live_pidfile(pid_file)
if not pid:
print("PID in %s is not valid (deleting)" % pid_file)
try:
os.unlink(pid_file)
except (OSError, IOError) as e:
print("Could not delete: %s" % e)
return 2
return 1
for j in range(10):
if not live_pidfile(pid_file):
break
import signal
os.kill(pid, signal.SIGTERM)
time.sleep(1)
else:
print("failed to kill web process %s" % pid)
return 3
if os.path.exists(pid_file):
os.unlink(pid_file)
return 0
def show_status(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print('No PID file %s' % pid_file)
return 1
pid = read_pidfile(pid_file)
if not pid:
print('No PID in file %s' % pid_file)
return 1
pid = live_pidfile(pid_file)
if not pid:
print('PID %s in %s is not running' % (pid, pid_file))
return 1
print('Server running in PID %s' % pid)
return 0
def restart_with_reloader(self):
self.restart_with_monitor(reloader=True)
def restart_with_monitor(self, reloader=False):
if self.verbose > 0:
if reloader:
print('Starting subprocess with file monitor')
else:
print('Starting subprocess with monitor parent')
while 1:
args = [self.quote_first_command_arg(sys.executable)] + sys.argv
new_environ = os.environ.copy()
if reloader:
new_environ[self._reloader_environ_key] = 'true'
else:
new_environ[self._monitor_environ_key] = 'true'
proc = None
try:
try:
_turn_sigterm_into_systemexit()
proc = subprocess.Popen(args, env=new_environ)
exit_code = proc.wait()
proc = None
except KeyboardInterrupt:
print('^C caught in monitor process')
if self.verbose > 1:
raise
return 1
finally:
if (proc is not None and
hasattr(os, 'kill')):
import signal
try:
os.kill(proc.pid, signal.SIGTERM)
except (OSError, IOError):
pass
if reloader:
# Reloader always exits with code 3; but if we are
# a monitor, any exit code will restart
if exit_code != 3:
return exit_code
if self.verbose > 0:
print('-' * 20, 'Restarting', '-' * 20)
def change_user_group(self, user, group):
if not user and not group:
return
import pwd
import grp
uid = gid = None
if group:
try:
gid = int(group)
group = grp.getgrgid(gid).gr_name
except ValueError:
import grp
try:
entry = grp.getgrnam(group)
except KeyError:
raise BadCommand(
"Bad group: %r; no such group exists" % group)
gid = entry.gr_gid
try:
uid = int(user)
user = pwd.getpwuid(uid).pw_name
except ValueError:
try:
entry = pwd.getpwnam(user)
except KeyError:
raise BadCommand(
"Bad username: %r; no such user exists" % user)
if not gid:
gid = entry.pw_gid
uid = entry.pw_uid
if self.verbose > 0:
print('Changing user to %s:%s (%s:%s)' % (
user, group or '(unknown)', uid, gid))
if hasattr(os, 'initgroups'):
os.initgroups(user, gid)
else:
os.setgroups([e.gr_gid for e in grp.getgrall()
if user in e.gr_mem] + [gid])
if gid:
os.setgid(gid)
if uid:
os.setuid(uid)
class LazyWriter(object):
"""
File-like object that opens a file lazily when it is first written
to.
"""
def __init__(self, filename, mode='w'):
self.filename = filename
self.fileobj = None
self.lock = threading.Lock()
self.mode = mode
def open(self):
if self.fileobj is None:
self.lock.acquire()
try:
if self.fileobj is None:
self.fileobj = open(self.filename, self.mode)
finally:
self.lock.release()
return self.fileobj
def write(self, text):
fileobj = self.open()
fileobj.write(text)
fileobj.flush()
def writelines(self, text):
fileobj = self.open()
fileobj.writelines(text)
fileobj.flush()
def flush(self):
self.open().flush()
def live_pidfile(pidfile):
"""(pidfile:str) -> int | None
Returns an int found in the named file, if there is one,
and if there is a running process with that process id.
Return None if no such process exists.
"""
pid = read_pidfile(pidfile)
if pid:
try:
os.kill(int(pid), 0)
return pid
except OSError as e:
if e.errno == errno.EPERM:
return pid
return None
def read_pidfile(filename):
if os.path.exists(filename):
try:
f = open(filename)
content = f.read()
f.close()
return int(content.strip())
except (ValueError, IOError):
return None
else:
return None
def _remove_pid_file(written_pid, filename, verbosity):
current_pid = os.getpid()
if written_pid != current_pid:
# A forked process must be exiting, not the process that
# wrote the PID file
return
if not os.path.exists(filename):
return
f = open(filename)
content = f.read().strip()
f.close()
try:
pid_in_file = int(content)
except ValueError:
pass
else:
if pid_in_file != current_pid:
print("PID file %s contains %s, not expected PID %s" % (
filename, pid_in_file, current_pid))
return
if verbosity > 0:
print("Removing PID file %s" % filename)
try:
os.unlink(filename)
return
except OSError as e:
# Record, but don't give traceback
print("Cannot remove PID file: %s" % e)
# well, at least lets not leave the invalid PID around...
try:
f = open(filename, 'w')
f.write('')
f.close()
except OSError as e:
print('Stale PID left in file: %s (%e)' % (filename, e))
else:
print('Stale PID removed')
def ensure_port_cleanup(bound_addresses, maxtries=30, sleeptime=2):
"""
This makes sure any open ports are closed.
Does this by connecting to them until they give connection
refused. Servers should call like::
import paste.script
ensure_port_cleanup([80, 443])
"""
atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries,
sleeptime=sleeptime)
def _cleanup_ports(bound_addresses, maxtries=30, sleeptime=2):
# Wait for the server to bind to the port.
import socket
import errno
for bound_address in bound_addresses:
for attempt in range(maxtries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(bound_address)
except socket.error as e:
if e.args[0] != errno.ECONNREFUSED:
raise
break
else:
time.sleep(sleeptime)
else:
raise SystemExit('Timeout waiting for port.')
sock.close()
def _turn_sigterm_into_systemexit():
"""
Attempts to turn a SIGTERM exception into a SystemExit exception.
"""
try:
import signal
except ImportError:
return
def handle_term(signo, frame):
raise SystemExit
signal.signal(signal.SIGTERM, handle_term)
# ---- from paste.script.command --------------------------------------
python_version = sys.version.splitlines()[0].strip()
parser = optparse.OptionParser(add_help_option=False,
# version='%s from %s (python %s)'
# % (dist, dist.location, python_version),
usage='%prog [paster_options] COMMAND [command_options]')
parser.add_option(
'-h', '--help',
action='store_true',
dest='do_help',
help="Show this help message")
parser.disable_interspersed_args()
# @@: Add an option to run this in another Python interpreter
commands = {
'serve': ServeCommand
}
def run(args=None):
if (not args and
len(sys.argv) >= 2 and
os.environ.get('_') and sys.argv[0] != os.environ['_'] and
os.environ['_'] == sys.argv[1]):
# probably it's an exe execution
args = ['exe', os.environ['_']] + sys.argv[2:]
if args is None:
args = sys.argv[1:]
options, args = parser.parse_args(args)
options.base_parser = parser
if options.do_help:
args = ['help'] + args
if not args:
print('Usage: %s COMMAND' % sys.argv[0])
args = ['help']
command_name = args[0]
if command_name not in commands:
command = NotFoundCommand
else:
command = commands[command_name]
invoke(command, command_name, options, args[1:])
def invoke(command, command_name, options, args):
try:
runner = command(command_name)
exit_code = runner.run(args)
except BadCommand as e:
print(e.message)
exit_code = e.exit_code
sys.exit(exit_code)
|
__init__.py
|
'''
xbrlDB is an interface to XBRL databases.
Two implementations are provided:
(1) the XBRL Public Database schema for Postgres, published by XBRL US.
(2) an graph database, based on the XBRL Abstract Model PWD 2.
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
and does not apply to the XBRL US Database schema and description.
'''
import time, os, io, sys, logging
from arelle.Locale import format_string
from .XbrlPublicPostgresDB import insertIntoDB as insertIntoPostgresDB, isDBPort as isPostgresPort
from .XbrlSemanticSqlDB import insertIntoDB as insertIntoSemanticSqlDB, isDBPort as isSemanticSqlPort
from .XbrlSemanticGraphDB import insertIntoDB as insertIntoRexsterDB, isDBPort as isRexsterPort
from .XbrlSemanticRdfDB import insertIntoDB as insertIntoRdfDB, isDBPort as isRdfPort
from .XbrlSemanticJsonDB import insertIntoDB as insertIntoJsonDB, isDBPort as isJsonPort
from .XbrlDpmSqlDB import insertIntoDB as insertIntoDpmDB, isDBPort as isDpmPort
dbTypes = {
"postgres": insertIntoPostgresDB,
"mssqlSemantic": insertIntoSemanticSqlDB,
"mysqlSemantic": insertIntoSemanticSqlDB,
"orclSemantic": insertIntoSemanticSqlDB,
"pgSemantic": insertIntoSemanticSqlDB,
"sqliteSemantic": insertIntoSemanticSqlDB,
"sqliteDpmDB": insertIntoDpmDB,
"rexster": insertIntoRexsterDB,
"rdfDB": insertIntoRdfDB,
"json": insertIntoJsonDB
}
dbProduct = {
"postgres": "postgres",
"mssqlSemantic": "mssql",
"mysqlSemantic": "mysql",
"orclSemantic": "orcl",
"pgSemantic": "postgres",
"sqliteSemantic": "sqlite",
"sqliteDpmDB": "sqlite",
"rexster": None,
"rdfDB": None,
"json": None
}
_loadFromDBoptions = None # only set for load, vs store operation
_storeIntoDBoptions = None
_schemaRefSubstitutions = None # for DPM database
def xbrlDBmenuEntender(cntlr, menu, *args, **kwargs):
def storeIntoDBMenuCommand():
# save DTS menu item has been invoked
if cntlr.modelManager is None or cntlr.modelManager.modelXbrl is None:
cntlr.addToLog("No XBRL instance or taxonomy is loaded.")
return
from arelle.DialogUserPassword import askDatabase
# (user, password, host, port, database)
priorDBconnection = cntlr.config.get("xbrlDBconnection", None)
dbConnection = askDatabase(cntlr.parent, priorDBconnection)
if not dbConnection: # action cancelled
return
def backgroundStoreIntoDB():
try:
host, port, user, password, db, timeout, dbType = dbConnection
product = None
if timeout and timeout.isdigit():
timeout = int(timeout)
# identify server
if dbType in dbTypes:
insertIntoDB = dbTypes[dbType]
product = dbProduct[dbType]
else:
cntlr.addToLog(_("Probing host {0} port {1} to determine server database type.")
.format(host, port))
if isPostgresPort(host, port):
dbType = "postgres"
insertIntoDB = insertIntoPostgresDB
elif isSemanticSqlPort(host, port):
dbType = "pgSemantic"
insertIntoDB = insertIntoPostgresDB
elif isRexsterPort(host, port):
dbType = "rexster"
insertIntoDB = insertIntoRexsterDB
elif isRdfPort(host, port, db):
dbType = "rdfDB"
insertIntoDB = insertIntoRdfDB
elif isJsonPort(host, port, db):
dbType = "json"
insertIntoDB = insertIntoJsonDB
else:
cntlr.addToLog(_("Unable to determine server type!\n ") +
_("Probing host {0} port {1} unable to determine server type.")
.format(host, port))
cntlr.config["xbrlDBconnection"] = (host, port, user, password, db, timeout, '') # forget type
cntlr.saveConfig()
return
cntlr.addToLog(_("Database type {} identified.").format(dbType))
cntlr.config["xbrlDBconnection"] = (host, port, user, password, db, timeout, dbType)
cntlr.saveConfig()
startedAt = time.time()
insertIntoDB(cntlr.modelManager.modelXbrl,
host=host, port=port, user=user, password=password, database=db, timeout=timeout,
product=product)
cntlr.addToLog(format_string(cntlr.modelManager.locale,
_("stored to database in %.2f secs"),
time.time() - startedAt))
except Exception as ex:
import traceback
cntlr.addToLog(
_("[xpDB:exception] Loading XBRL DB: %(exception)s: %(error)s \n%(traceback)s") %
{"exception": ex.__class__.__name__,
"error": str(ex),
"exc_info": True,
"traceback": traceback.format_tb(sys.exc_info()[2])})
cntlr.config["xbrlDBconnection"] = (host, port, user, password, db, timeout, '') # forget type
cntlr.saveConfig()
import threading
thread = threading.Thread(target=backgroundStoreIntoDB)
thread.daemon = True
thread.start()
# Extend menu with an item for the savedts plugin
menu.add_command(label="Store to XBRL DB",
underline=0,
command=storeIntoDBMenuCommand)
# add log handler
logging.getLogger("arelle").addHandler(LogToDbHandler())
def storeIntoDB(dbConnection, modelXbrl, rssItem=None, **kwargs):
host = port = user = password = db = timeout = dbType = None
if isinstance(dbConnection, (list, tuple)): # variable length list
if len(dbConnection) > 0: host = dbConnection[0]
if len(dbConnection) > 1: port = dbConnection[1]
if len(dbConnection) > 2: user = dbConnection[2]
if len(dbConnection) > 3: password = dbConnection[3]
if len(dbConnection) > 4: db = dbConnection[4]
if len(dbConnection) > 5 and dbConnection[5] and dbConnection[5].isdigit():
timeout = int(dbConnection[5])
if len(dbConnection) > 6: dbType = dbConnection[6]
startedAt = time.time()
product = None
if dbType in dbTypes:
insertIntoDB = dbTypes[dbType]
product = dbProduct[dbType]
elif isPostgresPort(host, port):
insertIntoDB = insertIntoPostgresDB
elif isSemanticSqlPort(host, port):
insertIntoDB = insertIntoSemanticSqlDB
elif isRexsterPort(host, port):
insertIntoDB = insertIntoRexsterDB
elif isRdfPort(host, port, db):
insertIntoDB = insertIntoRdfDB
elif isJsonPort(host, port, db):
insertIntoDB = insertIntoJsonDB
else:
modelXbrl.modelManager.addToLog('Server at "{0}:{1}" is not recognized to be either a Postgres or a Rexter service.'.format(host, port))
return
result = insertIntoDB(modelXbrl, host=host, port=port, user=user, password=password, database=db, timeout=timeout, product=product, rssItem=rssItem, **kwargs)
if kwargs.get("logStoredMsg", result): # if false/None result and no logStoredMsg parameter then skip the message
modelXbrl.modelManager.addToLog(format_string(modelXbrl.modelManager.locale,
_("stored to database in %.2f secs"),
time.time() - startedAt), messageCode="info", file=modelXbrl.uri)
return result
def xbrlDBcommandLineOptionExtender(parser, *args, **kwargs):
# extend command line options to store to database
parser.add_option("--store-to-XBRL-DB",
action="store",
dest="storeIntoXbrlDb",
help=_("Store into XBRL DB. "
"Provides connection string: host,port,user,password,database[,timeout[,{postgres|rexster|rdfDB}]]. "
"Autodetects database type unless 7th parameter is provided. "))
parser.add_option("--load-from-XBRL-DB",
action="store",
dest="loadFromXbrlDb",
help=_("Load from XBRL DB. "
"Provides connection string: host,port,user,password,database[,timeout[,{postgres|rexster|rdfDB}]]. "
"Specifies DB parameters to load and optional file to save XBRL into. "))
logging.getLogger("arelle").addHandler(LogToDbHandler())
def xbrlDBCommandLineXbrlLoaded(cntlr, options, modelXbrl, *args, **kwargs):
from arelle.ModelDocument import Type
if modelXbrl.modelDocument.type == Type.RSSFEED and getattr(options, "storeIntoXbrlDb", False):
modelXbrl.xbrlDBconnection = options.storeIntoXbrlDb.split(",")
# for semantic SQL database check for loaded filings
if (len(modelXbrl.xbrlDBconnection) > 7 and
modelXbrl.xbrlDBconnection[6] in ("mssqlSemantic","mysqlSemantic","orclSemantic",
"pgSemantic","sqliteSemantic") and
modelXbrl.xbrlDBconnection[7] == "skipLoadedFilings"):
# specify reloading of cached source documents (may have been corrupted originally or refiled)
modelXbrl.reloadCache = True
storeIntoDB(modelXbrl.xbrlDBconnection, modelXbrl, rssObject=modelXbrl.modelDocument)
def xbrlDBCommandLineXbrlRun(cntlr, options, modelXbrl, *args, **kwargs):
from arelle.ModelDocument import Type
if (modelXbrl.modelDocument.type not in (Type.RSSFEED, Type.TESTCASE, Type.REGISTRYTESTCASE) and
getattr(options, "storeIntoXbrlDb", False) and
not getattr(modelXbrl, "xbrlDBprocessedByStreaming", False)):
dbConnection = options.storeIntoXbrlDb.split(",")
storeIntoDB(dbConnection, modelXbrl)
def xbrlDBvalidateRssItem(val, modelXbrl, rssItem, *args, **kwargs):
if hasattr(val.modelXbrl, 'xbrlDBconnection'):
storeIntoDB(val.modelXbrl.xbrlDBconnection, modelXbrl, rssItem)
def xbrlDBtestcaseVariationXbrlLoaded(val, modelXbrl, *args, **kwargs):
if _storeIntoDBoptions:
return storeIntoDB(_storeIntoDBoptions.split(','), modelXbrl)
def xbrlDBdialogRssWatchDBconnection(*args, **kwargs):
try:
from .DialogRssWatchExtender import dialogRssWatchDBextender
dialogRssWatchDBextender(*args, **kwargs)
except ImportError:
pass
def xbrlDBdialogRssWatchValidateChoices(dialog, frame, row, *args, **kwargs):
from arelle.UiUtil import checkbox
dialog.checkboxes += (
checkbox(frame, 2, row,
"Store into XBRL Database",
"storeIntoXbrlDb"),
)
def xbrlDBrssWatchHasWatchAction(rssWatchOptions, *args, **kwargs):
return rssWatchOptions.get("xbrlDBconnection") and rssWatchOptions.get("storeIntoXbrlDB")
def xbrlDBrssDoWatchAction(modelXbrl, rssWatchOptions, rssItem, *args, **kwargs):
dbConnectionString = rssWatchOptions.get("xbrlDBconnection")
if dbConnectionString:
dbConnection = dbConnectionString.split(',')
storeIntoDB(dbConnection, modelXbrl)
def xbrlDBLoaderSetup(cntlr, options, *args, **kwargs):
global _loadFromDBoptions, _storeIntoDBoptions, _schemaRefSubstitutions
# set options to load from DB (instead of load from XBRL and store in DB)
_loadFromDBoptions = getattr(options, "loadFromXbrlDb", None)
_storeIntoDBoptions = getattr(options, "storeIntoXbrlDb", None)
_schemaRefSubstitutions = None
if _storeIntoDBoptions:
dbConnection = _storeIntoDBoptions.split(',')
if len(dbConnection) > 7 and dbConnection[6] == "sqliteDpmDB":
for extraArg in dbConnection[7:]:
argName, _sep, argValue = extraArg.partition("=")
if argName == "schemaRefSubstitutions":
_schemaRefSubstitutions = dict(_keyVal.split(":")[0:2] for _keyVal in argValue.split(";"))
def xbrlDBLoader(modelXbrl, mappedUri, filepath, *args, **kwargs):
# check if big instance and has header with an initial incomplete tree walk (just 2 elements
if not _loadFromDBoptions:
return None
# load from DB and save XBRL in filepath, returning modelDocument
extraArgs = {"loadDBsaveToFile": filepath, "logStoredMsg": False}
dbConnection = _loadFromDBoptions.split(',')
if len(dbConnection) > 7:
for extraArg in dbConnection[7:]:
argName, _sep, argValue = extraArg.partition("=")
extraArgs[argName] = argValue
return storeIntoDB(dbConnection, modelXbrl, **extraArgs)
def xbrlDBmodelXbrlInit(modelXbrl, *args, **kwargs):
modelXbrl.xbrlDBprocessedByStreaming = False
def xbrlDBstartStreaming(modelXbrl, *args, **kwargs):
if _storeIntoDBoptions:
return storeIntoDB(_storeIntoDBoptions.split(','), modelXbrl, streamingState="start", logStoredMsg=False)
def xbrlDBstreamingFacts(modelXbrl, modelFacts, *args, **kwargs):
if _storeIntoDBoptions:
return storeIntoDB(_storeIntoDBoptions.split(','), modelXbrl, streamingState="acceptFacts", streamedFacts=modelFacts, logStoredMsg=False)
def xbrlDBfinishStreaming(modelXbrl, *args, **kwargs):
if _storeIntoDBoptions:
modelXbrl.xbrlDBprocessedByStreaming = True
return storeIntoDB(_storeIntoDBoptions.split(','), modelXbrl, streamingState="finish", logStoredMsg=False)
def modelDocumentInstanceSchemaRefRewriter(modelDocument, url, *args, **kwargs):
if _schemaRefSubstitutions:
for _from, _to in _schemaRefSubstitutions.items():
url = url.replace(_from, _to) # for DPM db substitutions
return url
class LogToDbHandler(logging.Handler):
def __init__(self):
super(LogToDbHandler, self).__init__()
self.logRecordBuffer = []
def flush(self):
del self.logRecordBuffer[:]
def dbHandlerLogEntries(self, clear=True):
entries = []
for logRec in self.logRecordBuffer:
message = { "text": self.format(logRec) }
if logRec.args:
for n, v in logRec.args.items():
message[n] = v
entry = {"code": logRec.messageCode,
"level": logRec.levelname.lower(),
"refs": logRec.refs,
"message": message}
entries.append(entry)
if clear:
del self.logRecordBuffer[:]
return entries
def emit(self, logRecord):
self.logRecordBuffer.append(logRecord)
__pluginInfo__ = {
'name': 'XBRL Database',
'version': '0.9',
'description': "This plug-in implements the XBRL Public Postgres, Abstract Model and DPM Databases. ",
'license': 'Apache-2 (Arelle plug-in), BSD license (pg8000 library)',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2013 Mark V Systems Limited, All rights reserved,\n'
'uses: cx_Oracle Copyright (c) 2007-2012, Anthony Tuininga. All rights reserved (Oracle DB), \n'
' (and)Copyright (c) 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta, Canada. All rights reserved, \n'
' pg8000, Copyright (c) 2007-2009, Mathieu Fenniak (Postgres DB), \n'
' pyodbc, no copyright, Michael Kleehammer (MS SQL), \n'
' PyMySQL, Copyright (c) 2010, 2013 PyMySQL contributors (MySQL DB), and\n'
' rdflib, Copyright (c) 2002-2012, RDFLib Team (RDF DB)',
# classes of mount points (required)
'CntlrWinMain.Menu.Tools': xbrlDBmenuEntender,
'CntlrCmdLine.Options': xbrlDBcommandLineOptionExtender,
'CntlrCmdLine.Utility.Run': xbrlDBLoaderSetup,
'CntlrCmdLine.Xbrl.Loaded': xbrlDBCommandLineXbrlLoaded,
'CntlrCmdLine.Xbrl.Run': xbrlDBCommandLineXbrlRun,
'DialogRssWatch.FileChoices': xbrlDBdialogRssWatchDBconnection,
'DialogRssWatch.ValidateChoices': xbrlDBdialogRssWatchValidateChoices,
'ModelDocument.PullLoader': xbrlDBLoader,
'RssWatch.HasWatchAction': xbrlDBrssWatchHasWatchAction,
'RssWatch.DoWatchAction': xbrlDBrssDoWatchAction,
'ModelXbrl.Init': xbrlDBmodelXbrlInit,
'Streaming.Start': xbrlDBstartStreaming,
'Streaming.Facts': xbrlDBstreamingFacts,
'Streaming.Finish': xbrlDBfinishStreaming,
'Validate.RssItem': xbrlDBvalidateRssItem,
'TestcaseVariation.Xbrl.Loaded': xbrlDBtestcaseVariationXbrlLoaded,
'ModelDocument.InstanceSchemaRefRewriter': modelDocumentInstanceSchemaRefRewriter
}
|
3.thread_getting_thinfo.py
|
import threading
import time
def worker():
print(threading.currentThread().getName(), "Starting")
time.sleep(2)
print(threading.currentThread().getName(), "Ending")
return
t = threading.Thread(target=worker)
t.start()
print("testing")
|
test_start_manager.py
|
import threading
import time as ttime
import json
from bluesky_queueserver.manager.start_manager import WatchdogProcess
from bluesky_queueserver.tests.common import format_jsonrpc_msg
import logging
class ReManagerEmulation(threading.Thread):
"""
Emulation of RE Manager, which is using Thread instead of Process.
The functionality of the emulator is to test if Watchdog can start
and restart RE Manager properly. The emulator also generates periodic
'heartbeat' messages to inform RE Manager that it is running.
"""
def __init__(
self, *args, conn_watchdog, conn_worker, config=None, msg_queue=None, log_level=logging.DEBUG, **kwargs
):
super().__init__(*args, **kwargs)
self._conn_watchdog = conn_watchdog
self.n_loops = 0
self._exit = False
self._restart = False
self._send_heartbeat = True
self._lock = threading.Lock()
self._config_dict = config or {}
self._log_level = log_level
def _heartbeat(self):
hb_period, dt = 0.5, 0.01
n_wait = round(hb_period / dt)
msg = format_jsonrpc_msg("heartbeat", {"value": "alive"}, notification=True)
msg_json = json.dumps(msg)
while True:
# Since we are emulating 'kill' method, we want the function to
# react to 'exit' quickly.
for n in range(n_wait):
ttime.sleep(0.005)
if self._exit:
return
if self._send_heartbeat:
with self._lock:
self._conn_watchdog.send(msg_json)
def exit(self, *, restart=False):
"""
Stop the emulated RE Manager (exit the 'run' method). Set 'restart=True'
to skip informing Watchdog that the exit is intentional: Watchdog is expected
to restart the process.
"""
self._restart = restart
self._exit = True
def kill(self):
"""
This is emulation of 'kill' method of mp.Process. The method just normally
exists the current process.
"""
self.exit(restart=True)
def send_msg_to_watchdog(self, method, params=None, *, notification=False, timeout=0.5):
# The function may block all communication for the period of 'timeout', but
# this is acceptable for testing. Timeout would typically indicate an error.
msg = format_jsonrpc_msg(method, params, notification=notification)
with self._lock:
self._conn_watchdog.send(json.dumps(msg))
if notification:
return
if self._conn_watchdog.poll(timeout):
response_json = self._conn_watchdog.recv()
response = json.loads(response_json)
result = response["result"]
else:
result = None
return result
def stop_heartbeat(self):
"""
Heatbeat generator may be stopped to emulate 'freezing' of the event loop of RE Manager.
"""
self._send_heartbeat = False
def run(self):
th_hb = threading.Thread(target=self._heartbeat)
th_hb.start()
while not self._exit:
ttime.sleep(0.01)
self.n_loops += 1
if not self._restart:
msg = format_jsonrpc_msg("manager_stopping", notification=True)
with self._lock:
self._conn_watchdog.send(json.dumps(msg))
th_hb.join()
class ReWorkerEmulation(threading.Thread):
def __init__(
self,
*args,
conn,
config=None,
msg_queue=None,
log_level=logging.DEBUG,
user_group_permissions=None,
**kwargs,
):
super().__init__(*args, **kwargs)
self._config_dict = config or {}
self._exit = False
self.n_loops = 0
self._log_level = log_level
def exit(self):
self._exit = True
def kill(self):
self.exit()
def run(self):
while not self._exit:
ttime.sleep(0.005)
self.n_loops += 1
def test_WatchdogProcess_1():
"""Test starting and orderly existing the RE Manager"""
wp = WatchdogProcess(cls_run_engine_manager=ReManagerEmulation)
wp_th = threading.Thread(target=wp.run)
wp_th.start()
ttime.sleep(1) # Let RE Manager run 1 second
assert wp._re_manager.n_loops > 0, "RE is not running"
wp._re_manager.exit(restart=False)
ttime.sleep(0.05)
assert wp._manager_is_stopping is True, "'Manager Stopping' flag is not set"
wp_th.join(0.1)
def test_WatchdogProcess_2():
"""Test starting RE Manager, stopping heartbeat generator
and waiting for restart of RE Manager"""
wp = WatchdogProcess(cls_run_engine_manager=ReManagerEmulation)
wp_th = threading.Thread(target=wp.run)
wp_th.start()
ttime.sleep(1) # Let RE Manager run 1 second
assert wp._re_manager.n_loops > 0, "RE is not running"
n_loops = wp._re_manager.n_loops
wp._re_manager.stop_heartbeat()
hb_timeout = wp._heartbeat_timeout
ttime.sleep(hb_timeout + 0.5)
# At this point RE Manager is expected to run for 0.5 second, so
# the new number of loops must be about 'n_loops/2'.
# Here we check if RE Manager was really restarted and the number of
# loops reset.
assert wp._re_manager.n_loops < n_loops, "Unexpected number of loops"
wp._re_manager.exit(restart=False)
ttime.sleep(0.05)
assert wp._manager_is_stopping is True, "'Manager Stopping' flag is not set"
wp_th.join(0.1)
def test_WatchdogProcess_3():
"""Test starting RE Manager, exiting without sending notification and
and waiting for the restart of RE Manager"""
wp = WatchdogProcess(cls_run_engine_manager=ReManagerEmulation)
wp_th = threading.Thread(target=wp.run)
wp_th.start()
ttime.sleep(1) # Let RE Manager run 1 second
assert wp._re_manager.n_loops > 0, "RE is not running"
n_loops = wp._re_manager.n_loops
# Stop RE Manager without notifying the Watchdog (emulates crashing of RE Manager)
wp._re_manager.exit(restart=True)
hb_timeout = wp._heartbeat_timeout
ttime.sleep(hb_timeout + 0.5)
# At this point RE Manager is expected to run for 0.5 second, so
# the new number of loops must be about 'n_loops/2'.
# Here we check if RE Manager was really restarted and the number of
# loops reset.
assert wp._re_manager.n_loops < n_loops, "Unexpected number of loops"
wp._re_manager.exit(restart=False)
ttime.sleep(0.05)
assert wp._manager_is_stopping is True, "'Manager Stopping' flag is not set"
wp_th.join(0.1)
def test_WatchdogProcess_4():
"""
Test if Watchdog correctly executing commands that control starting
and stopping RE Worker.
"""
wp = WatchdogProcess(cls_run_engine_manager=ReManagerEmulation, cls_run_engine_worker=ReWorkerEmulation)
wp_th = threading.Thread(target=wp.run)
wp_th.start()
ttime.sleep(0.01)
response = wp._re_manager.send_msg_to_watchdog("start_re_worker", params={"user_group_permissions": {}})
assert response["success"] is True, f"Unexpected response from RE Manager: {response}"
# Worker is expected to be alive
response = wp._re_manager.send_msg_to_watchdog("is_worker_alive")
assert response["worker_alive"] is True, f"Unexpected response from RE Manager: {response}"
# Join running process (thread). Expected to timeout.
# Note: here timeout should be set to be smaller than timeout for the message
# in 'send_msg_to_watchdog method.
response = wp._re_manager.send_msg_to_watchdog("join_re_worker", {"timeout": 0.1})
assert response["success"] is False, f"Unexpected response from RE Manager: {response}"
# Worker is expected to be alive
response = wp._re_manager.send_msg_to_watchdog("is_worker_alive")
assert response["worker_alive"] is True, "Unexpected response from RE Manager"
# Exit the process (thread).
wp._re_worker.exit()
ttime.sleep(0.01)
# Worker is expected to be stopped
response = wp._re_manager.send_msg_to_watchdog("is_worker_alive")
assert response["worker_alive"] is False, "Unexpected response from RE Manager"
response = wp._re_manager.send_msg_to_watchdog("join_re_worker", {"timeout": 0.5})
assert response["success"] is True, "Unexpected response from RE Manager"
wp._re_manager.exit(restart=False)
wp_th.join(0.1)
def test_WatchdogProcess_5():
"""
Test 'kill_re_worker' command RE Worker.
"""
wp = WatchdogProcess(cls_run_engine_manager=ReManagerEmulation, cls_run_engine_worker=ReWorkerEmulation)
wp_th = threading.Thread(target=wp.run)
wp_th.start()
ttime.sleep(0.01)
response = wp._re_manager.send_msg_to_watchdog("start_re_worker", params={"user_group_permissions": {}})
assert response["success"] is True, "Unexpected response from RE Manager"
# Worker is expected to be alive
response = wp._re_manager.send_msg_to_watchdog("is_worker_alive")
assert response["worker_alive"] is True, "Unexpected response from RE Manager"
# Kill RE Worker process (emulated, since RE Worker is a thread)
response = wp._re_manager.send_msg_to_watchdog("kill_re_worker")
assert response["success"] is True, "Unexpected response from RE Manager"
# Worker is expected to be stopped
response = wp._re_manager.send_msg_to_watchdog("is_worker_alive")
assert response["worker_alive"] is False, "Unexpected response from RE Manager"
response = wp._re_manager.send_msg_to_watchdog("join_re_worker", {"timeout": 0.5})
assert response["success"] is True, "Unexpected response from RE Manager"
wp._re_manager.exit(restart=False)
wp_th.join(0.1)
def test_WatchdogProcess_6():
"""
Test if RE configuration is passed to RE Worker
"""
config_worker = {"some_parameter1": "some_value1"}
config_manager = {"some_parameter2": "some_value2"}
wp = WatchdogProcess(
config_worker=config_worker,
config_manager=config_manager,
cls_run_engine_manager=ReManagerEmulation,
cls_run_engine_worker=ReWorkerEmulation,
)
wp_th = threading.Thread(target=wp.run)
wp_th.start()
ttime.sleep(0.01)
response = wp._re_manager.send_msg_to_watchdog("start_re_worker", params={"user_group_permissions": {}})
assert response["success"] is True, "Unexpected response from RE Manager"
# Check if configuration was set correctly in RE Worker and RE manager
assert wp._re_worker._config_dict == config_worker, "Worker configuration was not passed correctly"
assert wp._re_manager._config_dict == config_manager, "Manager configuration was not passed correctly"
# Exit the process (thread).
wp._re_worker.exit()
ttime.sleep(0.01)
response = wp._re_manager.send_msg_to_watchdog("join_re_worker", {"timeout": 0.5})
assert response["success"] is True, "Unexpected response from RE Manager"
wp._re_manager.exit(restart=False)
wp_th.join(0.1)
def test_WatchdogProcess_7():
"""
Test if the Watchdog and Manager processes are initialized with correct logger
"""
config_worker = {"some_parameter1": "some_value1"}
config_manager = {"some_parameter2": "some_value2"}
log_level = logging.INFO
wp = WatchdogProcess(
config_worker=config_worker,
config_manager=config_manager,
cls_run_engine_manager=ReManagerEmulation,
cls_run_engine_worker=ReWorkerEmulation,
log_level=log_level,
)
wp_th = threading.Thread(target=wp.run)
wp_th.start()
ttime.sleep(0.01)
response = wp._re_manager.send_msg_to_watchdog("start_re_worker", params={"user_group_permissions": {}})
assert response["success"] is True, "Unexpected response from RE Manager"
# Check if configuration was set correctly in RE Worker and RE manager
assert wp._re_worker._log_level == log_level
assert wp._re_manager._log_level == log_level
# Exit the process (thread).
wp._re_worker.exit()
ttime.sleep(0.01)
response = wp._re_manager.send_msg_to_watchdog("join_re_worker", {"timeout": 0.5})
assert response["success"] is True, "Unexpected response from RE Manager"
wp._re_manager.exit(restart=False)
wp_th.join(0.1)
|
datasets.py
|
# Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy, clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640):
self.img_size = img_size
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'stream'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print(f'{i + 1}/{n}: {s}... ', end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {fps:.2f} FPS).')
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels
if cache_path.is_file():
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed
cache = self.cache_labels(cache_path, prefix) # re-cache
else:
cache = self.cache_labels(cache_path, prefix) # cache
# Display cache
[nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total
desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + desc, total=n, initial=n)
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
labels, shapes = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape]
except Exception as e:
nc += 1
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = [nf, nm, ne, nc, i + 1]
torch.save(x, path) # save for next time
logging.info(f'{prefix}New cache created: {path}')
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9 = []
s = self.img_size
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padx
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + pady
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padx
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + pady
labels9.append(labels)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
if len(labels9):
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
# Arguments
path: Path to images directory
weights: Train, val, test weights (list)
"""
path = Path(path) # images dir
files = list(path.rglob('*.*'))
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
for i, img in tqdm(zip(indices, files), total=n):
if img.suffix[1:] in img_formats:
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
|
ethereum_storage.py
|
import time
from threading import Thread, Lock, Event
from typing import Optional, Union, Tuple, Generator, Dict, Set
from flask import current_app
from web3.exceptions import BadFunctionCallOutput
from app import socketio
from app.utils.settings import Settings
from app.models import KeyLookupTable
from app.utils.ethereum_utils import EthereumUtils
from app.utils.misc import Address
class EthereumStorage:
def __init__(self, account: Address, password: str):
"""
Load the ethereum storage for `account`. `password` should also be provided.
**Assumptions**:
1. Singleton class EthereumUtils has been initialized before
2. Contracts has been initialized (EthereumUtils.init_contracts)
3. Storage contract for `account` has been created (EthereumUtils.new_storage)
"""
self.__cache_dict: Dict[str, Tuple[str, bool]] = {}
self.__change_set: Set[str] = set()
self.__delete_set: Set[str] = set()
self.__ethereum_utils = EthereumUtils()
self.__account = account
self.__password = password
try:
self.__storage = self.__ethereum_utils.get_storage(account)
except TypeError:
raise TypeError
self.__lock = Lock()
self.__terminating = False
self.__store_interval = 15
self.__load_interval = 5
self.__store_event = Event()
self.__blockchain_length = Settings().blockchain_length
self.__blockchain = Settings().blockchain
# load blockchain from disk
for k, v in self.__blockchain:
if v == '':
del self.__cache_dict[k]
else:
self.__cache_dict[k] = (v, True)
# make up for the missing entries (delete entries that have not sync'ed)
for k in self.__cache_dict:
if not k.startswith('__') and KeyLookupTable.query.get(k) is None:
new_entry = KeyLookupTable(key=k, meta_data='', hidden=False)
KeyLookupTable.query.session.add(new_entry)
KeyLookupTable.query.session.commit()
self.__load_thread = Thread(target=self.load_worker, daemon=True)
self.__store_thread = Thread(target=self.store_worker, daemon=True)
self.__load_thread.start()
self.__store_thread.start()
self.__loaded = False
self.__app = current_app._get_current_object()
def add(self, k: str, v: str):
"""
Add a new entry with key `k` and value `v` into the database. If the entry with key `k` exists,
update its value with `v`. **This will not immediately write the underlying database.**
"""
with self.__lock:
if k in self.__cache_dict and self.__cache_dict[k][1]:
self.__change_set.add(k)
elif k in self.__delete_set:
self.__delete_set.remove(k)
self.__change_set.add(k)
self.__cache_dict[k] = (v, False)
socketio.emit('persistence change', k)
def delete(self, k: str):
"""
Delete an entry in database with key `k`. If the key does not exist, an exception `KeyError` will be thrown.
**This will not immediately write the underlying database.**
"""
with self.__lock:
if k in self.__cache_dict:
if not self.__cache_dict[k][1]:
if k in self.__change_set:
# changed in cache
self.__delete_set.add(k)
del self.__cache_dict[k]
self.__change_set.remove(k)
else:
# new in cache, not changed in cache
del self.__cache_dict[k]
else:
self.__delete_set.add(k)
del self.__cache_dict[k]
else:
raise KeyError(k)
def get(self, k: str, check_persistence: bool = False) -> Union[Optional[str], Tuple[Optional[str], bool]]:
"""
Get an existing entry with key `k` in the database. If the entry with key `k` exists, return its value.
If the key does not exist, return `None`. If `check_persistence` is `True`,
returns a tuple like `(value, True)`, where the second element shows whether this key-value pair has been
`store`d into the underlying database. If `check_persistence` is `True` and the key does not exist, return
`(None, None)`.
"""
result = self.__cache_dict.get(k, (None, None))
if check_persistence:
return result
else:
return result[0]
def get_all(self) -> dict:
"""
Return all keys with their values and persistence in the database. The returned value should have a structure
like this:
{
key: (value, persistence)
}
"""
return self.__cache_dict
def __get_all_add(self) -> Generator[Tuple[str, str], None, None]:
return ((k, v[0]) for k, v in self.__cache_dict.items() if not v[1])
def __get_all_del(self) -> Generator[str, None, None]:
return (k for k in self.__delete_set)
def store(self):
"""
Synchronize the changes with underlying database.
"""
self.__ethereum_utils.unlock_account(self.__account, self.__password, duration=60)
# FIXME: use async add
# TODO: how to determine if a key is really stored? only update persistence if transaction mined?
add_list = []
with self.__lock:
for k, v in self.__get_all_add():
print('adding:', k, v)
add_list.append((k, v, self.__ethereum_utils.add_async(self.__account, k, v)))
for k in self.__get_all_del():
print('deleting:', k)
add_list.append((None, None, self.__ethereum_utils.add_async(self.__account, k)))
self.__change_set = set()
self.__delete_set = set()
return add_list
def estimate_cost(self, args: dict) -> int:
"""
Estimates the cost of the storage operation with arguments `args`.
"""
# FIXME: it returns gas count (gas count * gas price = cost in wei)
key = args['key']
value = args['value']
return self.__ethereum_utils.estimate_add_cost(self.__account, key, value)
def calculate_total_cost(self) -> int:
"""
Calculates the cost of currently cached storage operations.
"""
# FIXME: it returns gas count (gas count * gas price = cost in wei)
s = 0
for k, v in self.__get_all_add():
s += self.__ethereum_utils.estimate_add_cost(self.__account, k, v)
for k in self.__get_all_del():
s += self.__ethereum_utils.estimate_add_cost(self.__account, k)
return s
def balance(self) -> int:
"""
Returns the balance (remaining storage space) of current user.
"""
# FIXME: it returns wei
return self.__ethereum_utils.get_balance(self.__account)
def get_constructor_arguments(self) -> Address:
"""
Returns the arguments list to pass to the constructor.
"""
# TODO: is it necessary to return password?
return self.__account
def size(self) -> int:
return len(self.__cache_dict)
def __setitem__(self, key, value):
self.add(key, value)
def __delitem__(self, key):
self.delete(key)
def __getitem__(self, item):
self.get(item)
def __len__(self):
return self.size()
def store_worker(self):
while True:
try:
self.__store_event.set()
add_list = self.store()
if self.__terminating:
# Terminating, hopefully someone will mine our transaction :)
return
finished = set()
while len(finished) < len(add_list):
for k, v, h in add_list:
if h not in finished and self.__ethereum_utils.get_transaction_receipt(h):
finished.add(h)
if k:
with self.__lock:
if self.__cache_dict.get(k)[0] == v:
self.__cache_dict[k] = (v, True)
socketio.emit('persistence change', k)
time.sleep(0.01)
self.__store_event.clear()
time.sleep(self.__store_interval)
except Exception as e:
print(e)
time.sleep(self.__store_interval)
def load_key_value(self, k: str, v: str):
self.__blockchain.append((k, v))
if v == '':
del self.__cache_dict[k]
if not k.startswith('__'):
KeyLookupTable.query.filter_by(key=k).delete()
else:
self.__cache_dict[k] = (v, True)
if not k.startswith('__'):
old_entry = KeyLookupTable.query.get(k)
if old_entry:
old_entry.meta_data = ''
else:
new_entry = KeyLookupTable(key=k, meta_data='', hidden=False)
KeyLookupTable.query.session.add(new_entry)
def load_worker(self):
while True:
try:
if self.__terminating:
return
new_length = self.__ethereum_utils.get_length(self.__account)
print('load', self.__blockchain_length, new_length)
if new_length > self.__blockchain_length:
self.__store_event.wait()
with self.__lock:
with self.__app.app_context():
for k, v in self.__ethereum_utils.get_history(self.__account, self.__blockchain_length,
self.__storage):
print('loading:', k, v)
self.load_key_value(k, v)
self.__blockchain_length = new_length
KeyLookupTable.query.session.commit()
Settings().blockchain_length = new_length
Settings().blockchain = self.__blockchain
Settings().write()
socketio.emit('refresh password')
except BadFunctionCallOutput:
break
except Exception as e:
print(e)
finally:
self.__loaded = True
time.sleep(self.__load_interval)
def terminate(self):
self.__terminating = True
self.__load_thread.join()
self.__store_thread.join()
def __del__(self):
self.terminate()
@property
def loaded(self):
return self.__loaded
|
eventEngine.py
|
# encoding: UTF-8
# 系统模块
from queue import Queue, Empty
from threading import Thread, Timer
from time import sleep
from collections import defaultdict
# 第三方模块
#from qtpy.QtCore import QTimer
#import threading
#import time
# 自己开发的模块
#from .eventType import *
from vnpy.trader.event.eventType import EVENT_TIMER
import pyuv
import os
########################################################################
class EventEngine(object):
"""
事件驱动引擎
事件驱动引擎中所有的变量都设置为了私有,这是为了防止不小心
从外部修改了这些变量的值或状态,导致bug。
变量说明
__queue:私有变量,事件队列
__active:私有变量,事件引擎开关
__thread:私有变量,事件处理线程
__timer:私有变量,计时器
__handlers:私有变量,事件处理函数字典
方法说明
__run: 私有方法,事件处理线程连续运行用
__process: 私有方法,处理事件,调用注册在引擎中的监听函数
__onTimer:私有方法,计时器固定事件间隔触发后,向事件队列中存入计时器事件
start: 公共方法,启动引擎
stop:公共方法,停止引擎
register:公共方法,向引擎中注册监听函数
unregister:公共方法,向引擎中注销监听函数
put:公共方法,向事件队列中存入新的事件
事件监听函数必须定义为输入参数仅为一个event对象,即:
函数
def func(event)
...
对象方法
def method(self, event)
...
"""
#----------------------------------------------------------------------
def __init__(self):
"""初始化事件引擎"""
# 事件队列
self.__queue = Queue()
# 事件引擎开关
self.__active = False
# 事件处理线程
self.__thread = Thread(target = self.__run)
# 计时器,用于触发计时器事件
self.__timer = Timer(1, self.__onTimer)
#self.__timer.timeout.connect(self.__onTimer)
# 这里的__handlers是一个字典,用来保存对应的事件调用关系
# 其中每个键对应的值是一个列表,列表中保存了对该事件进行监听的函数功能
self.__handlers = defaultdict(list)
# __generalHandlers是一个列表,用来保存通用回调函数(所有事件均调用)
self.__generalHandlers = []
#----------------------------------------------------------------------
def __del__(self):
self.stop()
#----------------------------------------------------------------------
def __run(self):
"""引擎运行"""
while self.__active == True:
try:
event = self.__queue.get(block = True, timeout = 1) # 获取事件的阻塞时间设为1秒
self.__process(event)
except Empty:
pass
#----------------------------------------------------------------------
def __process(self, event):
"""处理事件"""
# 检查是否存在对该事件进行监听的处理函数
if event.type_ in self.__handlers:
# 若存在,则按顺序将事件传递给处理函数执行
[handler(event) for handler in self.__handlers[event.type_]]
# 以上语句为Python列表解析方式的写法,对应的常规循环写法为:
#for handler in self.__handlers[event.type_]:
#handler(event)
# 调用通用处理函数进行处理
if self.__generalHandlers:
[handler(event) for handler in self.__generalHandlers]
#----------------------------------------------------------------------
def __onTimer(self):
"""向事件队列中存入计时器事件"""
# 创建计时器事件
event = Event(type_=EVENT_TIMER)
# 向队列中存入计时器事件
self.put(event)
#print('hello timer') #打印输出
self.__timer = Timer(1, self.__onTimer)
self.__timer.start()
#----------------------------------------------------------------------
def start(self, timer=True):
"""
引擎启动
timer:是否要启动计时器
"""
# 将引擎设为启动
self.__active = True
# 启动事件处理线程
self.__thread.start()
# 启动计时器,计时器事件间隔默认设定为1秒
if timer:
self.__timer.start()
#----------------------------------------------------------------------
def stop(self):
"""停止引擎"""
# 将引擎设为停止
self.__active = False
# 停止计时器
self.__timer.cancel()
# 等待事件处理线程退出
self.__thread.join()
#----------------------------------------------------------------------
def register(self, type_, handler):
"""注册事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无defaultDict会自动创建新的list
handlerList = self.__handlers[type_]
# 若要注册的处理器不在该事件的处理器列表中,则注册该事件
if handler not in handlerList:
handlerList.append(handler)
#----------------------------------------------------------------------
def unregister(self, type_, handler):
"""注销事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无则忽略该次注销请求
handlerList = self.__handlers[type_]
# 如果该函数存在于列表中,则移除
if handler in handlerList:
handlerList.remove(handler)
# 如果函数列表为空,则从引擎中移除该事件类型
if not handlerList:
del self.__handlers[type_]
#----------------------------------------------------------------------
def put(self, event):
"""向事件队列中存入事件"""
self.__queue.put(event)
#----------------------------------------------------------------------
def registerGeneralHandler(self, handler):
"""注册通用事件处理函数监听"""
if handler not in self.__generalHandlers:
self.__generalHandlers.append(handler)
#----------------------------------------------------------------------
def unregisterGeneralHandler(self, handler):
"""注销通用事件处理函数监听"""
if handler in self.__generalHandlers:
self.__generalHandlers.remove(handler)
########################################################################
class EventEngine2(object):
"""
计时器使用python线程的事件驱动引擎
"""
#----------------------------------------------------------------------
def __init__(self):
"""初始化事件引擎"""
# 事件队列
self.__queue = Queue()
# 事件引擎开关
self.__active = False
self.__loop = pyuv.Loop.default_loop()
self.__loop.queue_work(self.__run)
# 事件处理线程
#self.__thread = Thread(target = self.__run)
# 计时器,用于触发计时器事件
self.__timer = Thread(target = self.__runTimer)
self.__timerActive = False # 计时器工作状态
self.__timerSleep = 1 # 计时器触发间隔(默认1秒)
# 这里的__handlers是一个字典,用来保存对应的事件调用关系
# 其中每个键对应的值是一个列表,列表中保存了对该事件进行监听的函数功能
self.__handlers = defaultdict(list)
# __generalHandlers是一个列表,用来保存通用回调函数(所有事件均调用)
self.__generalHandlers = []
#----------------------------------------------------------------------
def __run(self):
"""引擎运行"""
while self.__active == True:
try:
event = self.__queue.get(block = True, timeout = 1) # 获取事件的阻塞时间设为1秒
self.__process(event)
except Empty:
pass
#----------------------------------------------------------------------
def __process(self, event):
"""处理事件"""
# 检查是否存在对该事件进行监听的处理函数
if event.type_ in self.__handlers:
# 若存在,则按顺序将事件传递给处理函数执行
[handler(event) for handler in self.__handlers[event.type_]]
# 以上语句为Python列表解析方式的写法,对应的常规循环写法为:
#for handler in self.__handlers[event.type_]:
#handler(event)
# 调用通用处理函数进行处理
if self.__generalHandlers:
[handler(event) for handler in self.__generalHandlers]
#----------------------------------------------------------------------
def __runTimer(self):
"""运行在计时器线程中的循环函数"""
while self.__timerActive:
# 创建计时器事件
event = Event(type_=EVENT_TIMER)
# 向队列中存入计时器事件
self.put(event)
# 等待
sleep(self.__timerSleep)
#----------------------------------------------------------------------
def start(self, timer=True):
"""
引擎启动
timer:是否要启动计时器 n
"""
# 将引擎设为启动
self.__active = True
# 启动事件处理线程
#self.__thread.start()
# 启动计时器,计时器事件间隔默认设定为1秒
if timer:
self.__timerActive = True
self.__timer.start()
self.__loop.run()
#----------------------------------------------------------------------
def stop(self):
"""停止引擎"""
# 将引擎设为停止
self.__active = False
# 停止计时器
self.__timerActive = False
self.__timer.join()
# 等待事件处理线程退出
self.__thread.join()
#----------------------------------------------------------------------
def register(self, type_, handler):
"""注册事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无defaultDict会自动创建新的list
handlerList = self.__handlers[type_]
# 若要注册的处理器不在该事件的处理器列表中,则注册该事件
if handler not in handlerList:
handlerList.append(handler)
#----------------------------------------------------------------------
def unregister(self, type_, handler):
"""注销事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无则忽略该次注销请求
handlerList = self.__handlers[type_]
# 如果该函数存在于列表中,则移除
if handler in handlerList:
handlerList.remove(handler)
# 如果函数列表为空,则从引擎中移除该事件类型
if not handlerList:
del self.__handlers[type_]
#----------------------------------------------------------------------
def put(self, event):
"""向事件队列中存入事件"""
self.__queue.put(event)
#----------------------------------------------------------------------
def registerGeneralHandler(self, handler):
"""注册通用事件处理函数监听"""
if handler not in self.__generalHandlers:
self.__generalHandlers.append(handler)
#----------------------------------------------------------------------
def unregisterGeneralHandler(self, handler):
"""注销通用事件处理函数监听"""
if handler in self.__generalHandlers:
self.__generalHandlers.remove(handler)
########################################################################
class Event:
"""事件对象"""
count=0
#----------------------------------------------------------------------
def __init__(self, type_=None):
"""Constructor"""
self.type_ = type_ # 事件类型
self.dict_ = {} # 字典用于保存具体的事件数据
#----------------------------------------------------------------------
def test():
"""测试函数"""
import sys
from datetime import datetime
from PyQt4.QtCore import QCoreApplication
def simpletest(event):
print(('处理每秒触发的计时器事件:%s' % str(datetime.now())))
app = QCoreApplication(sys.argv)
ee = EventEngine2()
#ee.register(EVENT_TIMER, simpletest)
ee.registerGeneralHandler(simpletest)
ee.start()
app.exec_()
count =0
#----------------------------------------------------------------------
def test2():
"""测试函数"""
import sys
from datetime import datetime
print('start')
def simpletest(event):
global count
count = count+1
print(count)
print(('处理每秒触发的计时器事件:%s' % str(datetime.now())))
if count>10:
count=0
#while 1:
# continue;
#print('quit')
#查看进程跟线程的关系
os.system("pstree -p " + str(os.getpid()))
ee = EventEngine2()
#ee.register(EVENT_TIMER, simpletest)
ee.registerGeneralHandler(simpletest)
ee.start()
# 直接运行脚本可以进行测试
if __name__ == '__main__':
test2()
|
controller.py
|
"""KEYBOARD CONTROLLER"""
from threading import Thread
from kivy.core.window import Window
from .rateLimitDecorator import RateLimited
class Controller:
def __init__(self, widget, key_press_handler, key_release_handler=None):
self.widget = widget
self.key_press_handler = key_press_handler
self.key_release_handler = key_release_handler
self.keys_being_pressed = dict()
self.kill_thread = False
self._thread = Thread(target=self._seeder)
self._thread.start()
self._bind_keyboard()
def _bind_keyboard(self):
self._keyboard = Window.request_keyboard(self._keyboard_closed, self.widget)
self._keyboard.bind(on_key_down=self._on_keyboard_down)
self._keyboard.bind(on_key_up=self._on_keyboard_up)
def _keyboard_closed(self):
self._keyboard.unbind(on_key_down=self._on_keyboard_down)
self._keyboard = None
def _on_keyboard_down(self, keyboard, keycode, *i):
self.keys_being_pressed[keycode[0]] = keycode[1]
return True
def _on_keyboard_up(self, keyboard, keycode):
self.keys_being_pressed.pop(keycode[0])
self.key_release_handler(*keycode)
return True
def _seeder(self):
@RateLimited(61)
def _seed(self):
for key, value in dict(self.keys_being_pressed).items():
self.key_press_handler(key, value)
while not self.kill_thread:
_seed(self)
|
server.py
|
"""
Utilities for creating bokeh Server instances.
"""
import datetime as dt
import html
import inspect
import logging
import os
import pathlib
import signal
import sys
import traceback
import threading
import uuid
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial, wraps
from types import FunctionType, MethodType
from urllib.parse import urljoin, urlparse
import param
import bokeh
import bokeh.command.util
# Bokeh imports
from bokeh.application import Application as BkApplication
from bokeh.application.handlers.code import CodeHandler, _monkeypatch_io, patch_curdoc
from bokeh.application.handlers.function import FunctionHandler
from bokeh.command.util import build_single_handler_application
from bokeh.core.templates import AUTOLOAD_JS
from bokeh.document.events import ModelChangedEvent
from bokeh.embed.bundle import Script
from bokeh.embed.elements import html_page_for_render_items, script_for_render_items
from bokeh.embed.util import RenderItem
from bokeh.io import curdoc
from bokeh.server.server import Server
from bokeh.server.urls import per_app_patterns, toplevel_patterns
from bokeh.server.views.autoload_js_handler import AutoloadJsHandler as BkAutoloadJsHandler
from bokeh.server.views.doc_handler import DocHandler as BkDocHandler
from bokeh.server.views.root_handler import RootHandler as BkRootHandler
from bokeh.server.views.static_handler import StaticHandler
# Tornado imports
from tornado.ioloop import IOLoop
from tornado.websocket import WebSocketHandler
from tornado.web import RequestHandler, StaticFileHandler, authenticated
from tornado.wsgi import WSGIContainer
# Internal imports
from ..util import edit_readonly
from .logging import LOG_SESSION_CREATED, LOG_SESSION_DESTROYED, LOG_SESSION_LAUNCHING
from .profile import profile_ctx
from .reload import autoreload_watcher
from .resources import BASE_TEMPLATE, Resources, bundle_resources
from .state import state
logger = logging.getLogger(__name__)
#---------------------------------------------------------------------
# Private API
#---------------------------------------------------------------------
INDEX_HTML = os.path.join(os.path.dirname(__file__), '..', '_templates', "index.html")
def _origin_url(url):
if url.startswith("http"):
url = url.split("//")[1]
return url
def _server_url(url, port):
if url.startswith("http"):
return '%s:%d%s' % (url.rsplit(':', 1)[0], port, "/")
else:
return 'http://%s:%d%s' % (url.split(':')[0], port, "/")
def _eval_panel(panel, server_id, title, location, doc):
from ..template import BaseTemplate
from ..pane import panel as as_panel
with set_curdoc(doc):
if isinstance(panel, (FunctionType, MethodType)):
panel = panel()
if isinstance(panel, BaseTemplate):
doc = panel._modify_doc(server_id, title, doc, location)
else:
doc = as_panel(panel)._modify_doc(server_id, title, doc, location)
return doc
def async_execute(func):
"""
Wrap async event loop scheduling to ensure that with_lock flag
is propagated from function to partial wrapping it.
"""
if not state.curdoc or not state.curdoc.session_context:
ioloop = IOLoop.current()
event_loop = ioloop.asyncio_loop
if event_loop.is_running():
ioloop.add_callback(func)
else:
event_loop.run_until_complete(func())
return
if isinstance(func, partial) and hasattr(func.func, 'lock'):
unlock = not func.func.lock
else:
unlock = not getattr(func, 'lock', False)
if unlock:
@wraps(func)
async def wrapper(*args, **kw):
return await func(*args, **kw)
wrapper.nolock = True
else:
wrapper = func
state.curdoc.add_next_tick_callback(wrapper)
param.parameterized.async_executor = async_execute
def _initialize_session_info(session_context):
from ..config import config
session_id = session_context.id
sessions = state.session_info['sessions']
history = -1 if config._admin else config.session_history
if not config._admin and (history == 0 or session_id in sessions):
return
state.session_info['total'] += 1
if history > 0 and len(sessions) >= history:
old_history = list(sessions.items())
sessions = OrderedDict(old_history[-(history-1):])
state.session_info['sessions'] = sessions
sessions[session_id] = {
'launched': dt.datetime.now().timestamp(),
'started': None,
'rendered': None,
'ended': None,
'user_agent': session_context.request.headers.get('User-Agent')
}
state.param.trigger('session_info')
state.on_session_created(_initialize_session_info)
#---------------------------------------------------------------------
# Bokeh patches
#---------------------------------------------------------------------
def server_html_page_for_session(session, resources, title, template=BASE_TEMPLATE,
template_variables=None):
render_item = RenderItem(
token = session.token,
roots = session.document.roots,
use_for_title = False,
)
if template_variables is None:
template_variables = {}
bundle = bundle_resources(resources)
return html_page_for_render_items(bundle, {}, [render_item], title,
template=template, template_variables=template_variables)
def autoload_js_script(resources, token, element_id, app_path, absolute_url):
resources = Resources.from_bokeh(resources)
bundle = bundle_resources(resources)
render_items = [RenderItem(token=token, elementid=element_id, use_for_title=False)]
bundle.add(Script(script_for_render_items({}, render_items, app_path=app_path, absolute_url=absolute_url)))
return AUTOLOAD_JS.render(bundle=bundle, elementid=element_id)
# Patch Application to handle session callbacks
class Application(BkApplication):
async def on_session_created(self, session_context):
for cb in state._on_session_created:
cb(session_context)
await super().on_session_created(session_context)
def initialize_document(self, doc):
super().initialize_document(doc)
if doc in state._templates:
template = state._templates[doc]
template.server_doc(title=template.title, location=True, doc=doc)
bokeh.command.util.Application = Application
class SessionPrefixHandler:
@contextmanager
def _session_prefix(self):
prefix = self.request.uri.replace(self.application_context._url, '')
if not prefix.endswith('/'):
prefix += '/'
base_url = urljoin('/', prefix)
rel_path = '/'.join(['..'] * self.application_context._url.strip('/').count('/'))
old_url, old_rel = state.base_url, state.rel_path
# Handle autoload.js absolute paths
abs_url = self.get_argument('bokeh-absolute-url', default=None)
if abs_url is not None:
app_path = self.get_argument('bokeh-app-path', default=None)
rel_path = abs_url.replace(app_path, '')
with edit_readonly(state):
state.base_url = base_url
state.rel_path = rel_path
try:
yield
finally:
with edit_readonly(state):
state.base_url = old_url
state.rel_path = old_rel
# Patch Bokeh DocHandler URL
class DocHandler(BkDocHandler, SessionPrefixHandler):
@authenticated
async def get(self, *args, **kwargs):
with self._session_prefix():
session = await self.get_session()
state.curdoc = session.document
logger.info(LOG_SESSION_CREATED, id(session.document))
try:
resources = Resources.from_bokeh(self.application.resources())
page = server_html_page_for_session(
session, resources=resources, title=session.document.title,
template=session.document.template,
template_variables=session.document.template_variables
)
finally:
state.curdoc = None
self.set_header("Content-Type", 'text/html')
self.write(page)
per_app_patterns[0] = (r'/?', DocHandler)
# Patch Bokeh Autoload handler
class AutoloadJsHandler(BkAutoloadJsHandler, SessionPrefixHandler):
''' Implements a custom Tornado handler for the autoload JS chunk
'''
async def get(self, *args, **kwargs):
element_id = self.get_argument("bokeh-autoload-element", default=None)
if not element_id:
self.send_error(status_code=400, reason='No bokeh-autoload-element query parameter')
return
app_path = self.get_argument("bokeh-app-path", default="/")
absolute_url = self.get_argument("bokeh-absolute-url", default=None)
if absolute_url:
server_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(absolute_url))
else:
server_url = None
with self._session_prefix():
session = await self.get_session()
state.curdoc = session.document
try:
resources = Resources.from_bokeh(self.application.resources(server_url))
js = autoload_js_script(resources, session.token, element_id, app_path, absolute_url)
finally:
state.curdoc = None
self.set_header("Content-Type", 'application/javascript')
self.write(js)
per_app_patterns[3] = (r'/autoload.js', AutoloadJsHandler)
class RootHandler(BkRootHandler):
@authenticated
async def get(self, *args, **kwargs):
if self.index and not self.index.endswith('.html'):
prefix = "" if self.prefix is None else self.prefix
redirect_to = prefix + '.'.join(self.index.split('.')[:-1])
self.redirect(redirect_to)
await super().get(*args, **kwargs)
toplevel_patterns[0] = (r'/?', RootHandler)
bokeh.server.tornado.RootHandler = RootHandler
def modify_document(self, doc):
from bokeh.io.doc import set_curdoc as bk_set_curdoc
from ..config import config
logger.info(LOG_SESSION_LAUNCHING, id(doc))
if config.autoreload:
path = self._runner.path
argv = self._runner._argv
handler = type(self)(filename=path, argv=argv)
self._runner = handler._runner
module = self._runner.new_module()
# If no module was returned it means the code runner has some permanent
# unfixable problem, e.g. the configured source code has a syntax error
if module is None:
return
# One reason modules are stored is to prevent the module
# from being gc'd before the document is. A symptom of a
# gc'd module is that its globals become None. Additionally
# stored modules are used to provide correct paths to
# custom models resolver.
sys.modules[module.__name__] = module
doc.modules._modules.append(module)
old_doc = curdoc()
bk_set_curdoc(doc)
if config.autoreload:
set_curdoc(doc)
state.onload(autoreload_watcher)
sessions = []
try:
def post_check():
newdoc = curdoc()
# Do not let curdoc track modules when autoreload is enabled
# otherwise it will erroneously complain that there is
# a memory leak
if config.autoreload:
newdoc.modules._modules = []
# script is supposed to edit the doc not replace it
if newdoc is not doc:
raise RuntimeError("%s at '%s' replaced the output document" % (self._origin, self._runner.path))
def handle_exception(handler, e):
from bokeh.application.handlers.handler import handle_exception
from ..pane import HTML
# Clean up
del sys.modules[module.__name__]
if hasattr(doc, 'modules'):
doc.modules._modules.remove(module)
else:
doc._modules.remove(module)
bokeh.application.handlers.code_runner.handle_exception = handle_exception
tb = html.escape(traceback.format_exc())
# Serve error
HTML(
f'<b>{type(e).__name__}</b>: {e}</br><pre style="overflow-y: scroll">{tb}</pre>',
css_classes=['alert', 'alert-danger'], sizing_mode='stretch_width'
).servable()
if config.autoreload:
bokeh.application.handlers.code_runner.handle_exception = handle_exception
state._launching.append(doc)
with _monkeypatch_io(self._loggers):
with patch_curdoc(doc):
with profile_ctx(config.profiler) as sessions:
self._runner.run(module, post_check)
def _log_session_destroyed(session_context):
logger.info(LOG_SESSION_DESTROYED, id(doc))
doc.on_session_destroyed(_log_session_destroyed)
finally:
state._launching.remove(doc)
if config.profiler:
try:
path = doc.session_context.request.path
state._profiles[(path, config.profiler)] += sessions
state.param.trigger('_profiles')
except Exception:
pass
bk_set_curdoc(old_doc)
CodeHandler.modify_document = modify_document
# Copied from bokeh 2.4.0, to fix directly in bokeh at some point.
def create_static_handler(prefix, key, app):
# patch
key = '/__patchedroot' if key == '/' else key
route = prefix
route += "/static/(.*)" if key == "/" else key + "/static/(.*)"
if app.static_path is not None:
return (route, StaticFileHandler, {"path" : app.static_path})
return (route, StaticHandler, {})
bokeh.server.tornado.create_static_handler = create_static_handler
#---------------------------------------------------------------------
# Public API
#---------------------------------------------------------------------
def init_doc(doc):
doc = doc or curdoc()
if not doc.session_context:
return doc
session_id = doc.session_context.id
sessions = state.session_info['sessions']
if session_id not in sessions:
return doc
sessions[session_id].update({
'started': dt.datetime.now().timestamp()
})
doc.on_event('document_ready', state._init_session)
return doc
@contextmanager
def set_curdoc(doc):
state.curdoc = doc
yield
state.curdoc = None
def with_lock(func):
"""
Wrap a callback function to execute with a lock allowing the
function to modify bokeh models directly.
Arguments
---------
func: callable
The callable to wrap
Returns
-------
wrapper: callable
Function wrapped to execute without a Document lock.
"""
if inspect.iscoroutinefunction(func):
@wraps(func)
async def wrapper(*args, **kw):
return await func(*args, **kw)
else:
@wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.lock = True
return wrapper
@contextmanager
def unlocked():
"""
Context manager which unlocks a Document and dispatches
ModelChangedEvents triggered in the context body to all sockets
on current sessions.
"""
curdoc = state.curdoc
if curdoc is None or curdoc.session_context is None or curdoc.session_context.session is None:
yield
return
connections = curdoc.session_context.session._subscribed_connections
hold = curdoc.callbacks.hold_value
if hold:
old_events = list(curdoc.callbacks._held_events)
else:
old_events = []
curdoc.hold()
try:
yield
events = []
for conn in connections:
socket = conn._socket
if hasattr(socket, 'write_lock') and socket.write_lock._block._value == 0:
state._locks.add(socket)
locked = socket in state._locks
for event in curdoc.callbacks._held_events:
if (isinstance(event, ModelChangedEvent) and event not in old_events
and hasattr(socket, 'write_message') and not locked):
msg = conn.protocol.create('PATCH-DOC', [event])
WebSocketHandler.write_message(socket, msg.header_json)
WebSocketHandler.write_message(socket, msg.metadata_json)
WebSocketHandler.write_message(socket, msg.content_json)
for header, payload in msg._buffers:
WebSocketHandler.write_message(socket, header)
WebSocketHandler.write_message(socket, payload, binary=True)
elif event not in events:
events.append(event)
curdoc.callbacks._held_events = events
finally:
if not hold:
curdoc.unhold()
def serve(panels, port=0, address=None, websocket_origin=None, loop=None,
show=True, start=True, title=None, verbose=True, location=True,
threaded=False, **kwargs):
"""
Allows serving one or more panel objects on a single server.
The panels argument should be either a Panel object or a function
returning a Panel object or a dictionary of these two. If a
dictionary is supplied the keys represent the slugs at which
each app is served, e.g. `serve({'app': panel1, 'app2': panel2})`
will serve apps at /app and /app2 on the server.
Arguments
---------
panel: Viewable, function or {str: Viewable or function}
A Panel object, a function returning a Panel object or a
dictionary mapping from the URL slug to either.
port: int (optional, default=0)
Allows specifying a specific port
address : str
The address the server should listen on for HTTP requests.
websocket_origin: str or list(str) (optional)
A list of hosts that can connect to the websocket.
This is typically required when embedding a server app in
an external web site.
If None, "localhost" is used.
loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())
The tornado IOLoop to run the Server on
show : boolean (optional, default=True)
Whether to open the server in a new browser tab on start
start : boolean(optional, default=True)
Whether to start the Server
title: str or {str: str} (optional, default=None)
An HTML title for the application or a dictionary mapping
from the URL slug to a customized title
verbose: boolean (optional, default=True)
Whether to print the address and port
location : boolean or panel.io.location.Location
Whether to create a Location component to observe and
set the URL location.
threaded: boolean (default=False)
Whether to start the server on a new Thread
kwargs: dict
Additional keyword arguments to pass to Server instance
"""
kwargs = dict(kwargs, **dict(
port=port, address=address, websocket_origin=websocket_origin,
loop=loop, show=show, start=start, title=title, verbose=verbose,
location=location
))
if threaded:
from tornado.ioloop import IOLoop
kwargs['loop'] = loop = IOLoop() if loop is None else loop
server = StoppableThread(
target=get_server, io_loop=loop, args=(panels,), kwargs=kwargs
)
server_id = kwargs.get('server_id', uuid.uuid4().hex)
state._threads[server_id] = server
server.start()
else:
server = get_server(panels, **kwargs)
return server
class ProxyFallbackHandler(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback and
proxies the subpath.
"""
def initialize(self, fallback, proxy=None):
self.fallback = fallback
self.proxy = proxy
def prepare(self):
if self.proxy:
self.request.path = self.request.path.replace(self.proxy, '')
self.fallback(self.request)
self._finished = True
self.on_finish()
def get_static_routes(static_dirs):
"""
Returns a list of tornado routes of StaticFileHandlers given a
dictionary of slugs and file paths to serve.
"""
patterns = []
for slug, path in static_dirs.items():
if not slug.startswith('/'):
slug = '/' + slug
if slug == '/static':
raise ValueError("Static file route may not use /static "
"this is reserved for internal use.")
path = os.path.abspath(path)
if not os.path.isdir(path):
raise ValueError("Cannot serve non-existent path %s" % path)
patterns.append(
(r"%s/(.*)" % slug, StaticFileHandler, {"path": path})
)
return patterns
def get_server(panel, port=0, address=None, websocket_origin=None,
loop=None, show=False, start=False, title=None,
verbose=False, location=True, static_dirs={},
oauth_provider=None, oauth_key=None, oauth_secret=None,
oauth_extra_params={}, cookie_secret=None,
oauth_encryption_key=None, session_history=None, **kwargs):
"""
Returns a Server instance with this panel attached as the root
app.
Arguments
---------
panel: Viewable, function or {str: Viewable}
A Panel object, a function returning a Panel object or a
dictionary mapping from the URL slug to either.
port: int (optional, default=0)
Allows specifying a specific port
address : str
The address the server should listen on for HTTP requests.
websocket_origin: str or list(str) (optional)
A list of hosts that can connect to the websocket.
This is typically required when embedding a server app in
an external web site.
If None, "localhost" is used.
loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())
The tornado IOLoop to run the Server on.
show : boolean (optional, default=False)
Whether to open the server in a new browser tab on start.
start : boolean(optional, default=False)
Whether to start the Server.
title : str or {str: str} (optional, default=None)
An HTML title for the application or a dictionary mapping
from the URL slug to a customized title.
verbose: boolean (optional, default=False)
Whether to report the address and port.
location : boolean or panel.io.location.Location
Whether to create a Location component to observe and
set the URL location.
static_dirs: dict (optional, default={})
A dictionary of routes and local paths to serve as static file
directories on those routes.
oauth_provider: str
One of the available OAuth providers
oauth_key: str (optional, default=None)
The public OAuth identifier
oauth_secret: str (optional, default=None)
The client secret for the OAuth provider
oauth_extra_params: dict (optional, default={})
Additional information for the OAuth provider
cookie_secret: str (optional, default=None)
A random secret string to sign cookies (required for OAuth)
oauth_encryption_key: str (optional, default=False)
A random encryption key used for encrypting OAuth user
information and access tokens.
session_history: int (optional, default=None)
The amount of session history to accumulate. If set to non-zero
and non-None value will launch a REST endpoint at
/rest/session_info, which returns information about the session
history.
kwargs: dict
Additional keyword arguments to pass to Server instance.
Returns
-------
server : bokeh.server.server.Server
Bokeh Server instance running this panel
"""
from ..config import config
from .rest import REST_PROVIDERS
server_id = kwargs.pop('server_id', uuid.uuid4().hex)
kwargs['extra_patterns'] = extra_patterns = kwargs.get('extra_patterns', [])
if isinstance(panel, dict):
apps = {}
for slug, app in panel.items():
if isinstance(title, dict):
try:
title_ = title[slug]
except KeyError:
raise KeyError(
"Keys of the title dictionnary and of the apps "
f"dictionary must match. No {slug} key found in the "
"title dictionary.")
else:
title_ = title
slug = slug if slug.startswith('/') else '/'+slug
if 'flask' in sys.modules:
from flask import Flask
if isinstance(app, Flask):
wsgi = WSGIContainer(app)
if slug == '/':
raise ValueError('Flask apps must be served on a subpath.')
if not slug.endswith('/'):
slug += '/'
extra_patterns.append(('^'+slug+'.*', ProxyFallbackHandler,
dict(fallback=wsgi, proxy=slug)))
continue
if isinstance(app, pathlib.Path):
app = str(app) # enables serving apps from Paths
if (isinstance(app, str) and (app.endswith(".py") or app.endswith(".ipynb"))
and os.path.isfile(app)):
apps[slug] = build_single_handler_application(app)
else:
handler = FunctionHandler(partial(_eval_panel, app, server_id, title_, location))
apps[slug] = Application(handler)
else:
handler = FunctionHandler(partial(_eval_panel, panel, server_id, title, location))
apps = {'/': Application(handler)}
extra_patterns += get_static_routes(static_dirs)
if session_history is not None:
config.session_history = session_history
if config.session_history != 0:
pattern = REST_PROVIDERS['param']([], 'rest')
extra_patterns.extend(pattern)
state.publish('session_info', state, ['session_info'])
opts = dict(kwargs)
if loop:
loop.make_current()
opts['io_loop'] = loop
elif opts.get('num_procs', 1) == 1:
opts['io_loop'] = IOLoop.current()
if 'index' not in opts:
opts['index'] = INDEX_HTML
if address is not None:
opts['address'] = address
if websocket_origin:
if not isinstance(websocket_origin, list):
websocket_origin = [websocket_origin]
opts['allow_websocket_origin'] = websocket_origin
# Configure OAuth
from ..config import config
if config.oauth_provider:
from ..auth import OAuthProvider
opts['auth_provider'] = OAuthProvider()
if oauth_provider:
config.oauth_provider = oauth_provider
if oauth_key:
config.oauth_key = oauth_key
if oauth_extra_params:
config.oauth_extra_params = oauth_extra_params
if cookie_secret:
config.cookie_secret = cookie_secret
opts['cookie_secret'] = config.cookie_secret
server = Server(apps, port=port, **opts)
if verbose:
address = server.address or 'localhost'
url = f"http://{address}:{server.port}{server.prefix}"
print(f"Launching server at {url}")
state._servers[server_id] = (server, panel, [])
if show:
def show_callback():
server.show('/login' if config.oauth_provider else '/')
server.io_loop.add_callback(show_callback)
def sig_exit(*args, **kwargs):
server.io_loop.add_callback_from_signal(do_stop)
def do_stop(*args, **kwargs):
server.io_loop.stop()
try:
signal.signal(signal.SIGINT, sig_exit)
except ValueError:
pass # Can't use signal on a thread
if start:
server.start()
try:
server.io_loop.start()
except RuntimeError:
pass
return server
class StoppableThread(threading.Thread):
"""Thread class with a stop() method."""
def __init__(self, io_loop=None, **kwargs):
super().__init__(**kwargs)
self.io_loop = io_loop
def run(self):
if hasattr(self, '_target'):
target, args, kwargs = self._target, self._args, self._kwargs
else:
target, args, kwargs = self._Thread__target, self._Thread__args, self._Thread__kwargs
if not target:
return
bokeh_server = None
try:
bokeh_server = target(*args, **kwargs)
finally:
if isinstance(bokeh_server, Server):
bokeh_server.stop()
if hasattr(self, '_target'):
del self._target, self._args, self._kwargs
else:
del self._Thread__target, self._Thread__args, self._Thread__kwargs
def stop(self):
self.io_loop.add_callback(self.io_loop.stop)
|
test_program_for_MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3Class.py
|
# -*- coding: utf-8 -*-
'''
Reuben Brewer, Ph.D.
reuben.brewer@gmail.com
www.reubotics.com
Apache 2 License
Software Revision C, 09/03/2021
Verified working on: Python 3 for Windows 8.1 64-bit and Raspberry Pi Buster (no Mac testing yet).
THE SEPARATE-PROCESS-SPAWNING COMPONENT OF THIS CLASS IS NOT AVAILABLE IN PYTHON 2 DUE TO LIMITATION OF
"multiprocessing.set_start_method('spawn')" ONLY BEING AVAILABLE IN PYTHON 3. PLOTTING WITHIN A SINGLE PROCESS STILL WORKS.
'''
__author__ = 'reuben.brewer'
from MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3Class import *
from MyPrint_ReubenPython2and3Class import *
import os, sys, platform
import time, datetime
import threading
import collections
import math, numpy
import traceback
import re
###############
if sys.version_info[0] < 3:
from Tkinter import * #Python 2
import tkFont
import ttk
else:
from tkinter import * #Python 3
import tkinter.font as tkFont #Python 3
from tkinter import ttk
###############
###############
if sys.version_info[0] < 3:
from builtins import raw_input as input
else:
from future.builtins import input as input #"sudo pip3 install future" (Python 3) AND "sudo pip install future" (Python 2)
###############
##########################################################################################################
##########################################################################################################
def getPreciseSecondsTimeStampString():
ts = time.time()
return ts
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def TestButtonResponse():
global MyPrint_ReubenPython2and3ClassObject
global USE_MYPRINT_FLAG
if USE_MYPRINT_FLAG == 1:
MyPrint_ReubenPython2and3ClassObject.my_print("Test Button was Pressed!")
else:
print("Test Button was Pressed!")
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def GUI_update_clock():
global root
global EXIT_PROGRAM_FLAG
global GUI_RootAfterCallbackInterval_Milliseconds
global USE_GUI_FLAG
global MyPrint_ReubenPython2and3ClassObject
global MYPRINT_OPEN_FLAG
global MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject
global USE_PLOTTER_FLAG
global PLOTTER_OPEN_FLAG
global SHOW_IN_GUI_PLOTTER_FLAG
global PARENT_GUI_COUNTER
if USE_GUI_FLAG == 1:
if EXIT_PROGRAM_FLAG == 0:
#########################################################
#########################################################
PARENT_GUI_COUNTER = PARENT_GUI_COUNTER + 1
#MyPrint_ReubenPython2and3ClassObject.my_print("PID = " + str(os.getpid()) + ", PARENT_GUI_COUNTER: " + str(PARENT_GUI_COUNTER))
#########################################################
if MYPRINT_OPEN_FLAG == 1:
MyPrint_ReubenPython2and3ClassObject.GUI_update_clock()
#########################################################
#########################################################
#if USE_PLOTTER_FLAG == 1 and PLOTTER_OPEN_FLAG == 1 and SHOW_IN_GUI_PLOTTER_FLAG == 1:
#pass #DO NOT CALL MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject.GUI_update_clock() as the plotter is firing its own, internal root.after callbacks faster than in this parent root GUI loop.
#########################################################
root.after(GUI_RootAfterCallbackInterval_Milliseconds, GUI_update_clock)
#########################################################
#########################################################
#########################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def ExitProgram_Callback():
global root
global EXIT_PROGRAM_FLAG
global GUI_RootAfterCallbackInterval_Milliseconds
global MyPrint_ReubenPython2and3ClassObject
global MYPRINT_OPEN_FLAG
print("Exiting all threads in test_program_for_MyPrint_ReubenPython2and3Class.")
EXIT_PROGRAM_FLAG = 1
#########################################################
if MYPRINT_OPEN_FLAG == 1:
MyPrint_ReubenPython2and3ClassObject.ExitProgram_Callback()
#########################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def GUI_Thread():
global root
global GUI_RootAfterCallbackInterval_Milliseconds
################################################# KEY GUI LINE
#################################################
root = Tk()
#################################################
#################################################
#################################################
TestButton = Button(root, text='Test Button', state="normal", width=20, command=lambda i=1: TestButtonResponse())
TestButton.grid(row=0, column=0, padx=5, pady=1)
#################################################
#################################################
root.protocol("WM_DELETE_WINDOW", ExitProgram_Callback) # Set the callback function for when the window's closed.
root.after(GUI_RootAfterCallbackInterval_Milliseconds, GUI_update_clock)
root.mainloop()
#################################################
#################################################
root.quit() #Stop the GUI thread, MUST BE CALLED FROM GUI_Thread
root.destroy() #Close down the GUI thread, MUST BE CALLED FROM GUI_Thread
#################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
if __name__ == '__main__':
#################################################
#################################################
global my_platform
if platform.system() == "Linux":
if "raspberrypi" in platform.uname(): # os.uname() doesn't work in windows
my_platform = "pi"
else:
my_platform = "linux"
elif platform.system() == "Windows":
my_platform = "windows"
elif platform.system() == "Darwin":
my_platform = "mac"
else:
my_platform = "other"
print("The OS platform is: " + my_platform)
#################################################
#################################################
################################################
################################################
global USE_PLOTTER_FLAG
USE_PLOTTER_FLAG = 1
global USE_MYPRINT_FLAG
USE_MYPRINT_FLAG = 1
global USE_GUI_FLAG
USE_GUI_FLAG = 1
global USE_SINUSOIDAL_TEST_FLAG
USE_SINUSOIDAL_TEST_FLAG = 1
################################################
################################################
################################################
################################################
global SHOW_IN_GUI_PLOTTER_FLAG
SHOW_IN_GUI_PLOTTER_FLAG = 1
global SHOW_IN_GUI_MYPRINT_FLAG
SHOW_IN_GUI_MYPRINT_FLAG = 1
################################################
################################################
################################################
################################################
global GUI_ROW_MYPRINT
global GUI_COLUMN_MYPRINT
global GUI_PADX_MYPRINT
global GUI_PADY_MYPRINT
global GUI_ROWSPAN_MYPRINT
global GUI_COLUMNSPAN_MYPRINT
GUI_ROW_MYPRINT = 2
GUI_COLUMN_MYPRINT = 0
GUI_PADX_MYPRINT = 1
GUI_PADY_MYPRINT = 10
GUI_ROWSPAN_MYPRINT = 1
GUI_COLUMNSPAN_MYPRINT = 1
#################################################
#################################################
#################################################
#################################################
global EXIT_PROGRAM_FLAG
EXIT_PROGRAM_FLAG = 0
global root
global GUI_RootAfterCallbackInterval_Milliseconds
GUI_RootAfterCallbackInterval_Milliseconds = 30
global MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject
global PLOTTER_OPEN_FLAG
PLOTTER_OPEN_FLAG = -1
global MyPrint_ReubenPython2and3ClassObject
global MYPRINT_OPEN_FLAG
MYPRINT_OPEN_FLAG = -1
global PARENT_GUI_COUNTER
PARENT_GUI_COUNTER = 0
global CurrentTime_MainLoopThread
CurrentTime_MainLoopThread = -11111.0
global StartingTime_MainLoopThread
StartingTime_MainLoopThread = -11111.0
global SINUSOIDAL_MOTION_INPUT_ROMtestTimeToPeakAngle
SINUSOIDAL_MOTION_INPUT_ROMtestTimeToPeakAngle = 2.0
global SINUSOIDAL_MOTION_INPUT_MinValue
SINUSOIDAL_MOTION_INPUT_MinValue = -50
global SINUSOIDAL_MOTION_INPUT_MaxValue
SINUSOIDAL_MOTION_INPUT_MaxValue = 50
#################################################
#################################################
################################################# KEY GUI LINE
#################################################
if USE_GUI_FLAG == 1:
print("Starting GUI thread...")
GUI_Thread_ThreadingObject = threading.Thread(target=GUI_Thread)
GUI_Thread_ThreadingObject.setDaemon(True) #Should mean that the GUI thread is destroyed automatically when the main thread is destroyed.
GUI_Thread_ThreadingObject.start()
time.sleep(0.5) #Allow enough time for 'root' to be created that we can then pass it into other classes.
else:
root = None
#################################################
#################################################
#################################################
#################################################
global MyPlotterPureTkinter_MostRecentDict
MyPlotterPureTkinter_MostRecentDict = dict()
global MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject_MostRecentDict_StandAlonePlottingProcess_ReadyForWritingFlag
MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject_MostRecentDict_StandAlonePlottingProcess_ReadyForWritingFlag = -1
MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject_GUIparametersDict = dict([("EnableInternal_MyPrint_Flag", 1),
("NumberOfPrintLines", 10),
("UseBorderAroundThisGuiObjectFlag", 0),
("GraphCanvasWidth", 1280),
("GraphCanvasHeight", 700),
("GraphCanvasWindowStartingX", 0),
("GraphCanvasWindowStartingY", 0),
("GUI_RootAfterCallbackInterval_Milliseconds_IndependentOfParentRootGUIloopEvents", 20)])
global MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject_setup_dict
MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject_setup_dict = dict([("GUIparametersDict", MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject_GUIparametersDict),
("ParentPID", os.getpid()),
("WatchdogTimerDurationSeconds_ExpirationWillEndStandAlonePlottingProcess", 5.0),
("MarkerSize", 3),
("CurvesToPlotNamesAndColorsDictOfLists", dict([("NameList", ["PlotCurve0", "PlotCurve1", "PlotCurve2"]),("ColorList", ["Red", "Green", "Blue"])])),
("NumberOfDataPointToPlot", 25),
("XaxisNumberOfTickMarks", 10),
("YaxisNumberOfTickMarks", 10),
("XaxisNumberOfDecimalPlacesForLabels", 3),
("YaxisNumberOfDecimalPlacesForLabels", 3),
("XaxisAutoscaleFlag", 1),
("YaxisAutoscaleFlag", 1),
("X_min", 0.0),
("X_max", 5.0),
("Y_min", -5.0),
("Y_max", 5.0),
("XaxisDrawnAtBottomOfGraph", 0),
("XaxisLabelString", "Time (sec)"),
("YaxisLabelString", "Y-units (units)"),
("ShowLegendFlag", 1)])
if USE_PLOTTER_FLAG == 1:
try:
MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject = MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3Class(MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject_setup_dict)
time.sleep(0.25)
PLOTTER_OPEN_FLAG = MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject.OBJECT_CREATED_SUCCESSFULLY_FLAG
except:
exceptions = sys.exc_info()[0]
print("MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject, exceptions: %s" % exceptions)
traceback.print_exc()
#################################################
#################################################
#################################################
#################################################
if USE_MYPRINT_FLAG == 1:
MyPrint_ReubenPython2and3ClassObject_GUIparametersDict = dict([("USE_GUI_FLAG", USE_GUI_FLAG and SHOW_IN_GUI_MYPRINT_FLAG),
("root", root),
("UseBorderAroundThisGuiObjectFlag", 0),
("GUI_ROW", GUI_ROW_MYPRINT),
("GUI_COLUMN", GUI_COLUMN_MYPRINT),
("GUI_PADX", GUI_PADX_MYPRINT),
("GUI_PADY", GUI_PADY_MYPRINT),
("GUI_ROWSPAN", GUI_ROWSPAN_MYPRINT),
("GUI_COLUMNSPAN", GUI_COLUMNSPAN_MYPRINT)])
MyPrint_ReubenPython2and3ClassObject_setup_dict = dict([("NumberOfPrintLines", 10),
("WidthOfPrintingLabel", 200),
("PrintToConsoleFlag", 1),
("LogFileNameFullPath", os.getcwd() + "//TestLog.txt"),
("GUIparametersDict", MyPrint_ReubenPython2and3ClassObject_GUIparametersDict)])
try:
MyPrint_ReubenPython2and3ClassObject = MyPrint_ReubenPython2and3Class(MyPrint_ReubenPython2and3ClassObject_setup_dict)
time.sleep(0.25)
MYPRINT_OPEN_FLAG = MyPrint_ReubenPython2and3ClassObject.OBJECT_CREATED_SUCCESSFULLY_FLAG
except:
exceptions = sys.exc_info()[0]
print("MyPrint_ReubenPython2and3ClassObject __init__: Exceptions: %s" % exceptions)
traceback.print_exc()
#################################################
#################################################
#################################################
#################################################
if USE_PLOTTER_FLAG == 1 and PLOTTER_OPEN_FLAG != 1:
print("Failed to open MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject.")
input("Press any key (and enter) to exit.")
sys.exit()
#################################################
#################################################
#################################################
#################################################
if USE_MYPRINT_FLAG == 1 and MYPRINT_OPEN_FLAG != 1:
print("Failed to open MyPrint_ReubenPython2and3ClassObject.")
input("Press any key (and enter) to exit.")
sys.exit()
#################################################
#################################################
MyPrint_ReubenPython2and3ClassObject.my_print("$$$$$$$$$$$$$$ STARTING MAIN LOOP $$$$$$$$$$$$$$")
StartingTime_MainLoopThread = getPreciseSecondsTimeStampString()
while(EXIT_PROGRAM_FLAG == 0):
#################################################
#################################################
CurrentTime_MainLoopThread = getPreciseSecondsTimeStampString() - StartingTime_MainLoopThread
if CurrentTime_MainLoopThread > 5.0:
EXIT_PROGRAM_FLAG = 1
#################################################
#################################################
#################################################
#################################################
if USE_PLOTTER_FLAG == 1:
#################################################
MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject_MostRecentDict = MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject.GetMostRecentDataDict()
#print(str(MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject_MostRecentDict))
if "StandAlonePlottingProcess_ReadyForWritingFlag" in MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject_MostRecentDict:
MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject_MostRecentDict_StandAlonePlottingProcess_ReadyForWritingFlag = MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject_MostRecentDict["StandAlonePlottingProcess_ReadyForWritingFlag"]
else:
pass
#################################################
#################################################
if USE_SINUSOIDAL_TEST_FLAG == 1:
TimeGain = math.pi / (2.0 * SINUSOIDAL_MOTION_INPUT_ROMtestTimeToPeakAngle)
DesiredAngleDeg_1 = 0.5*(SINUSOIDAL_MOTION_INPUT_MaxValue + SINUSOIDAL_MOTION_INPUT_MinValue) + math.exp(0.1*CurrentTime_MainLoopThread)*0.5 * abs(SINUSOIDAL_MOTION_INPUT_MaxValue - SINUSOIDAL_MOTION_INPUT_MinValue) * math.sin(TimeGain * CurrentTime_MainLoopThread) # AUTOMATIC SINUSOIDAL MOVEMENT
DesiredAngleDeg_2 = 0.5*(SINUSOIDAL_MOTION_INPUT_MaxValue + SINUSOIDAL_MOTION_INPUT_MinValue) + math.exp(0.05*CurrentTime_MainLoopThread)*0.5 * abs(SINUSOIDAL_MOTION_INPUT_MaxValue - SINUSOIDAL_MOTION_INPUT_MinValue) * math.cos(TimeGain * CurrentTime_MainLoopThread) # AUTOMATIC SINUSOIDAL MOVEMENT
DesiredAngleDeg_3 = 0.25*(SINUSOIDAL_MOTION_INPUT_MaxValue + SINUSOIDAL_MOTION_INPUT_MinValue) + math.exp(0.03*CurrentTime_MainLoopThread)*0.5 * abs(SINUSOIDAL_MOTION_INPUT_MaxValue - SINUSOIDAL_MOTION_INPUT_MinValue) * math.tan(TimeGain * CurrentTime_MainLoopThread) # AUTOMATIC SINUSOIDAL MOVEMENT
if MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject_MostRecentDict_StandAlonePlottingProcess_ReadyForWritingFlag == 1:
#pass
MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject.ExternalAddPointOrListOfPointsToPlot("PlotCurve0", CurrentTime_MainLoopThread, DesiredAngleDeg_1)
#MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject.ExternalAddPointOrListOfPointsToPlot("PlotCurve1", CurrentTime_MainLoopThread, DesiredAngleDeg_2)
#MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject.ExternalAddPointOrListOfPointsToPlot("PlotCurve2", CurrentTime_MainLoopThread, DesiredAngleDeg_3)
time.sleep(0.050)
#################################################
##################################################
##################################################
MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3ClassObject.ExternalSendEndCommandToStandAloneProcess()
ExitProgram_Callback()
print("MAIN LEADER PROGRAM Exiting main program 'test_program_for_MyPlotterPureTkinterStandAloneProcess_ReubenPython2and3Class.")
##########################################################################################################
##########################################################################################################
|
attack.py
|
import time
from multiprocessing import Queue, Process, set_start_method
from victim import *
# Enable this to allow tensors being converted to numpy arrays
tf.compat.v1.enable_eager_execution()
try:
set_start_method('spawn')
except Exception:
pass
logging.basicConfig(level=logging.INFO)
# Silent unimportant log messages
for logger_name in ['transformers.configuration_utils',
'transformers.modeling_utils',
'transformers.tokenization_utils_base', 'absl']:
logging.getLogger(logger_name).setLevel(logging.ERROR)
def attack_process(idx, args, q):
rel2id = json.load(open(args.relation_path, 'r'))
id2rel = {v: k for k, v in rel2id.items()}
total_count, success_count, total_time = 0, 0, 0.0
# Load victim model
# Distribute models on devices equally
device_id = idx % torch.cuda.device_count() # Start from 0
device = torch.device('cuda:' + str(device_id))
model = REClassifier(
args.max_seq_len, args.model_path, rel2id, id2rel, device)
logging.info('Build model ' + str(idx) + ' on device ' + str(device_id))
# Load attacker
logging.info('New Attacker ' + str(idx))
# Preserve special tokens
skip_words = ['unused0', 'unused1', 'unused2', 'unused3']
# Attacker models
attack_models = {
'pw': OpenAttack.attackers.PWWSAttacker,
'tf': OpenAttack.attackers.TextFoolerAttacker,
'hf': OpenAttack.attackers.HotFlipAttacker,
'uat': OpenAttack.attackers.UATAttacker
}
if args.attacker != 'uat':
attacker = attack_models[args.attacker](skip_words=skip_words)
else:
attacker = attack_models[args.attacker]()
# Build evaluation object
options = {"success_rate": False, "fluency": False, "mistake": False, "semantic": False, "levenstein": False,
"word_distance": False, "modification_rate": False, "running_time": False, "progress_bar": False,
"invoke_limit": 500, "average_invoke": True}
attack_eval = OpenAttack.attack_evals.InvokeLimitedAttackEval(
attacker, model, **options)
# Generate samples in batches
while True:
if q.empty():
break
data = q.get()
# Save label for current sample for reference
model.current_label = data.y
start_time = time.time()
adv_data = attack_eval.generate_adv([data])
sample_list = dataset2sample(adv_data, id2rel)
with open(args.output_file, 'a') as f:
for sample in sample_list:
f.write(json.dumps(sample) + '\n')
cost_time = time.time() - start_time
total_time += cost_time
total_count += 1
success_count += len(sample_list)
logging.info('Success:{}/{:02.2f}%, time:{:02.2f}s/{:02.2f}s, jobs:{}/{}'.format(
len(sample_list), success_count / total_count * 100,
cost_time, total_time / total_count,
total_count, q.qsize()))
logging.info('Attacker {} finished and quit.'.format(idx))
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', '-i', type=str, required=True,
help='Where the input file containing original dataset is')
parser.add_argument('--model_path', '-m', type=str, required=True,
help='Full path for loading weights of model to attack')
parser.add_argument('--relation_path', '-r', type=str, required=True,
help='Full path to json file containing relation to index dict')
parser.add_argument('--attacker', '-a', type=str, choices=['pw', 'tf', 'hf', 'uat'], default='pw',
help='Name of attacker model, pw = PWWS, tf = TextFooler, hf = HotFlip')
parser.add_argument('--output_file', '-o', type=str, required=True,
help='Where to store adverserial dataset is')
parser.add_argument('--max_seq_len', '-l', type=int, default=128,
help='Maximum sequence length of bert model')
parser.add_argument('--num_jobs', '-j', type=int, default=1,
help='Maximum number of parallel workers in attacking')
parser.add_argument('--start_index', '-s', type=int, default=0,
help='Index of sample to start processing, used when you want to restore progress')
args = parser.parse_args()
logging.info('CUDA device status: {}, devices: {}'.format(
torch.cuda.is_available(), torch.cuda.device_count()))
# Load dataset
logging.info('Load dataset')
samples = []
rel2id = json.load(open(args.relation_path, 'r'))
id2rel = {v: k for k, v in rel2id.items()}
with open(args.input_file, 'r') as f:
for line in tqdm(f.readlines(), desc='reading dataset'):
sample = json.loads(line)
samples.append(sample)
dataset = sample2dataset(samples, rel2id)
# Cut dataset into mini-batches, each containing fixed number of samples
logging.info('Creating queue for dataset...')
queue = Queue()
for start_idx in range(args.start_index, len(dataset)):
queue.put(dataset[start_idx])
logging.info('Total tasks: ' + str(queue.qsize()))
# Start attacking
logging.info('Start attack')
if args.num_jobs > 1:
# Multi-process attacking
process_list = []
for index in range(args.num_jobs):
p = Process(target=attack_process, args=(index + 1, args, queue))
process_list.append(p)
p.start()
for p in process_list:
p.join()
else:
# Single-process attacking
attack_process(0, args, queue)
|
main.py
|
"""PythonHere app."""
# pylint: disable=wrong-import-order,wrong-import-position
from launcher_here import try_startup_script
try:
try_startup_script() # run script entrypoint, if it was passed
except Exception as exc:
startup_script_exception = exc # pylint: disable=invalid-name
else:
startup_script_exception = None # pylint: disable=invalid-name
import asyncio
import os
from pathlib import Path
import sys
import threading
from typing import Any, Dict
from kivy.app import App
from kivy.clock import Clock
from kivy.config import Config, ConfigParser
from kivy.logger import Logger
from enum_here import ScreenName, ServerState
from exception_manager_here import install_exception_handler, show_exception_popup
from patches_here import monkeypatch_kivy
from server_here import run_ssh_server
from window_here import reset_window_environment
monkeypatch_kivy()
class PythonHereApp(App):
"""PythonHere main app."""
def __init__(self):
super().__init__()
self.server_task = None
self.settings = None
self.ssh_server_config_ready = asyncio.Event()
self.ssh_server_started = asyncio.Event()
self.ssh_server_connected = asyncio.Event()
self.ssh_server_namespace = {}
self.icon = "data/logo/logo-32.png"
@property
def upload_dir(self) -> str:
"""Path to the directory to use for uploaded data."""
root_dir = Path(self.user_data_dir or ".").resolve()
upload_dir = Path(root_dir) / "upload"
upload_dir.mkdir(exist_ok=True)
return str(upload_dir)
@property
def config_path(self) -> str:
"""Path to the application config file."""
root_dir = Path(self.user_data_dir or ".").resolve()
return str(root_dir / "config.ini")
def load_config(self) -> ConfigParser:
"""Returning the application configuration."""
Config.read(self.config_path) # Override the configuration file location
return super().load_config()
def build(self):
"""Initialize application UI."""
super().build()
install_exception_handler()
self.settings = self.root.ids.settings
self.ssh_server_namespace.update(
{
"app": self,
"root": self.root,
}
)
self.update_server_config_status()
if startup_script_exception:
Clock.schedule_once(
lambda _: show_exception_popup(startup_script_exception), 0
)
def run_app(self):
"""Run application and SSH server tasks."""
self.ssh_server_started = asyncio.Event()
self.server_task = asyncio.ensure_future(run_ssh_server(self))
return asyncio.gather(self.async_run_app(), self.server_task)
async def async_run_app(self):
"""Run app asynchronously."""
try:
await self.async_run(async_lib="asyncio")
Logger.info("PythonHere: async run completed")
except asyncio.CancelledError:
Logger.info("PythonHere: app main task canceled")
except Exception as exc:
Logger.exception(exc)
if self.server_task:
self.server_task.cancel()
if self.get_running_app():
self.stop()
await self.cancel_asyncio_tasks()
async def cancel_asyncio_tasks(self):
"""Cancel all asyncio tasks."""
tasks = [
task for task in asyncio.all_tasks() if task is not asyncio.current_task()
]
if tasks:
for task in tasks:
task.cancel()
await asyncio.wait(tasks, timeout=1)
def update_server_config_status(self):
"""Check and update value of the `ssh_server_config_ready`, update screen."""
def update():
if all(self.get_pythonhere_config().values()):
self.ssh_server_config_ready.set()
screen.update()
screen = self.root.ids.here_screen_manager
screen.current = ServerState.starting_server
self.root.switch_screen(ScreenName.here)
threading.Thread(name="update_server_config_status", target=update).start()
def get_pythonhere_config(self):
"""Return user settings for SSH server."""
return self.settings.get_pythonhere_config()
def update_ssh_server_namespace(self, namespace: Dict[str, Any]):
"""Update SSH server namespace."""
self.ssh_server_namespace.update(namespace)
def on_start(self):
"""App start handler."""
Logger.info("PythonHere: app started")
def on_stop(self):
"""App stop handler."""
Logger.info("PythonHere: app stopped")
def on_pause(self):
"""Pause mode request handler."""
return True
def on_ssh_connection_made(self):
"""New authenticated SSH client connected handler."""
Logger.info("PythonHere: new SSH client connected")
if not self.ssh_server_connected.is_set():
self.ssh_server_connected.set()
Logger.info("PythonHere: reset window environment")
self.ssh_server_namespace["root"] = reset_window_environment()
self.chdir(self.upload_dir)
def chdir(self, path: str):
"""Changes the working directory."""
Logger.info("PythonHere: change working directory to %s", path)
os.chdir(path)
sys.path.insert(0, path)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(PythonHereApp().run_app())
loop.close()
|
session_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]],
dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
tensor_runner = sess.make_callable(a)
res = tensor_runner()
self.assertEqual(42.0, res)
op_runner = sess.make_callable(a.op)
res = op_runner()
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
list_runner = sess.make_callable([a, b, c, a.name, assign.op])
res = list_runner()
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
tuple_runner = sess.make_callable((a, b, c, a.name))
res = tuple_runner()
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
namedtuple_runner = sess.make_callable(ABC(a, b, c))
res = namedtuple_runner()
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchOrderedDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
self.assertTrue(isinstance(res, collections.OrderedDict))
self.assertEqual([3, 2, 1], list(res.keys()))
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[2])
self.assertEqual(44.0, res[1])
def testFetchNestingEmptyOneLevel(self):
with session.Session() as sess:
a_val = 11.0
a = constant_op.constant(a_val)
res = sess.run([[], tuple(), {}])
self.assertTrue(isinstance(res, list))
self.assertEquals(3, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
res = sess.run([[], tuple(), {}, a])
self.assertTrue(isinstance(res, list))
self.assertEquals(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
self.assertEqual(a_val, res[3])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c), ABC(a=a, b=b, c=c),
{'a': a.name, 'c': c, 'b': b}])
self.assertTrue(isinstance(res, list))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c),
{'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(DEFG(d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(3, len(res.d))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(3, len(res.e))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(3, len(res.g))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({'d': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {'a': a.name, 'c': c, 'b': b}})
self.assertTrue(isinstance(res, dict))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(3, len(res['d']))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(3, len(res['e']))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(3, len(res['g']))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].dense_shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(
sp, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderPartialShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32,
shape=shape,
name='placeholder1')
self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run([])
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run(())
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run({})
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64,
dtypes.complex128]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={feed_t: np_array}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={feed_t: np_array}))
# Also check that we can get both back.
out_v, feed_v = sess.run([out_t, feed_t],
feed_dict={feed_t: np_array})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t])
out_v, feed_v = feed_fetch_runner(np_array)
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(sess.run(feed_t, feed_dict={feed_t: c_list}),
c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01',
u'\u26a3 unicode', u'\U0001f60e deal with it']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def runTestPartialRun(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def runTestPartialRunIncomplete(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def runTestConcurrentPartialRun(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def runTestManyPartialRun(self, sess):
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.multiply(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def runTestRunAndPartialRun(self, sess):
a = constant_op.constant(2.0, dtypes.float32)
b = a * 2
c = b * 3
r1 = sess.run([b, c])
h = sess.partial_run_setup([b, c], [])
r2 = sess.partial_run(h, [b, c])
self.assertEqual(r1, r2)
def runTestPartialRunMissingPlaceholderFeedException(self, sess):
x = array_ops.placeholder(dtypes.float32, shape=())
fetches = [x * 2, x * 3]
handle = sess.partial_run_setup(fetches=fetches, feeds=[])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'You must feed a value for placeholder'):
sess.partial_run(handle, fetches[0])
def testPartialRunDirect(self):
self.runTestPartialRun(session.Session())
def testPartialRunIncompleteDirect(self):
self.runTestPartialRunIncomplete(session.Session())
def testConcurrentPartialRunDirect(self):
self.runTestConcurrentPartialRun(session.Session())
def testManyPartialRunDirect(self):
self.runTestManyPartialRun(session.Session())
def testRunAndPartialRunDirect(self):
self.runTestRunAndPartialRun(session.Session())
def testPartialRunMissingPlaceholderFeedExceptionDirect(self):
self.runTestPartialRunMissingPlaceholderFeedException(session.Session())
def testPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestPartialRun(session.Session(server.target))
def testPartialRunIncompleteDist(self):
server = server_lib.Server.create_local_server()
self.runTestPartialRunIncomplete(session.Session(server.target))
def testConcurrentPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestConcurrentPartialRun(session.Session(server.target))
def testManyPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestManyPartialRun(session.Session(server.target))
def testRunAndPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestRunAndPartialRun(session.Session(server.target))
def testPartialRunMissingPlaceholderFeedExceptionDist(self):
server = server_lib.Server.create_local_server()
self.runTestPartialRunMissingPlaceholderFeedException(
session.Session(server.target))
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(constant_op.constant(1.0), options=None,
run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(ValueError, 'may not be fed'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/gpu:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.multiply(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(d, feed_dict={a: 1.0},
options=run_options, run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, c.eval())
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, c.eval())
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(
capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = math_ops.square(tensor)
fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_fn2 = lambda feed: [feed.sq]
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.assertRaises(ValueError):
session.register_session_run_conversion_functions(SquaredTensor,
fetch_fn, feed_fn1, feed_fn2)
with self.test_session() as sess:
np1 = np.array([1.0, 1.5, 2.0, 2.5])
np2 = np.array([3.0, 3.5, 4.0, 4.5])
squared_tensor = SquaredTensor(np2)
squared_eval = sess.run(squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
squared_eval = sess.run(squared_tensor, feed_dict={
squared_tensor : np1 * np1})
self.assertAllClose(np1 * np1, squared_eval)
partial_run = sess.partial_run_setup([squared_tensor], [])
squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
if __name__ == '__main__':
googletest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.