source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
vlc.py
|
import asynchat
import asyncore
import os
import random
import re
import socket
import subprocess
import sys
import threading
import time
import urllib.error
import urllib.parse
import urllib.request
from syncplay import constants, utils
from syncplay.messages import getMessage
from syncplay.players.basePlayer import BasePlayer
from syncplay.utils import isBSD, isLinux, isWindows, isMacOS
class VlcPlayer(BasePlayer):
speedSupported = True
customOpenDialog = False
chatOSDSupported = False
alertOSDSupported = True
osdMessageSeparator = "; "
RE_ANSWER = re.compile(constants.VLC_ANSWER_REGEX)
SLAVE_ARGS = constants.VLC_SLAVE_ARGS
SLAVE_ARGS.extend(constants.VLC_SLAVE_EXTRA_ARGS)
vlcport = random.randrange(constants.VLC_MIN_PORT, constants.VLC_MAX_PORT) if (constants.VLC_MIN_PORT < constants.VLC_MAX_PORT) else constants.VLC_MIN_PORT
def __init__(self, client, playerPath, filePath, args):
from twisted.internet import reactor
self.reactor = reactor
self._client = client
self._paused = None
self._duration = None
self._filename = None
self._filepath = None
self._filechanged = False
self._lastVLCPositionUpdate = None
self.shownVLCLatencyError = False
self._previousPreviousPosition = -2
self._previousPosition = -1
self._position = 0
try: # Hack to fix locale issue without importing locale library
self.radixChar = "{:n}".format(1.5)[1:2]
if self.radixChar == "" or self.radixChar == "1" or self.radixChar == "5":
raise ValueError
except:
self._client.ui.showErrorMessage(
"Failed to determine locale. As a fallback Syncplay is using the following radix character: \".\".")
self.radixChar = "."
self._durationAsk = threading.Event()
self._filenameAsk = threading.Event()
self._pathAsk = threading.Event()
self._positionAsk = threading.Event()
self._pausedAsk = threading.Event()
self._vlcready = threading.Event()
self._vlcclosed = threading.Event()
self._listener = None
try:
self._listener = self.__Listener(self, playerPath, filePath, args, self._vlcready, self._vlcclosed)
except ValueError:
self._client.ui.showErrorMessage(getMessage("vlc-failed-connection"), True)
self.reactor.callFromThread(self._client.stop, True,)
return
try:
self._listener.setDaemon(True)
self._listener.start()
if not self._vlcready.wait(constants.VLC_OPEN_MAX_WAIT_TIME):
self._vlcready.set()
self._client.ui.showErrorMessage(getMessage("vlc-failed-connection"), True)
self.reactor.callFromThread(self._client.stop, True,)
self.reactor.callFromThread(self._client.initPlayer, self,)
except:
pass
def _fileUpdateClearEvents(self):
self._durationAsk.clear()
self._filenameAsk.clear()
self._pathAsk.clear()
def _fileUpdateWaitEvents(self):
self._durationAsk.wait()
self._filenameAsk.wait()
self._pathAsk.wait()
def _onFileUpdate(self):
self._fileUpdateClearEvents()
self._getFileInfo()
self._fileUpdateWaitEvents()
args = (self._filename, self._duration, self._filepath)
self.reactor.callFromThread(self._client.updateFile, *args)
self.setPaused(self._client.getGlobalPaused())
self.setPosition(self._client.getGlobalPosition())
def askForStatus(self):
self._filechanged = False
self._positionAsk.clear()
self._pausedAsk.clear()
self._listener.sendLine(".")
if self._filename and not self._filechanged:
self._positionAsk.wait(constants.PLAYER_ASK_DELAY)
self._client.updatePlayerStatus(self._paused, self.getCalculatedPosition())
else:
self._client.updatePlayerStatus(self._client.getGlobalPaused(), self._client.getGlobalPosition())
def getCalculatedPosition(self):
if self._lastVLCPositionUpdate is None:
return self._client.getGlobalPosition()
diff = time.time() - self._lastVLCPositionUpdate
if diff > constants.PLAYER_ASK_DELAY and not self._paused:
self._client.ui.showDebugMessage("VLC did not response in time, so assuming position is {} ({}+{})".format(
self._position + diff, self._position, diff))
if diff > constants.VLC_LATENCY_ERROR_THRESHOLD:
if not self.shownVLCLatencyError or constants.DEBUG_MODE:
self._client.ui.showErrorMessage(getMessage("media-player-latency-warning").format(int(diff)))
self.shownVLCLatencyError = True
return self._position + diff
else:
return self._position
def displayMessage(
self, message,
duration=constants.OSD_DURATION * 1000, OSDType=constants.OSD_DURATION, mood=constants.MESSAGE_NEUTRAL
):
duration /= 1000
if OSDType != constants.OSD_ALERT:
self._listener.sendLine('display-osd: {}, {}, {}'.format('top-right', duration, message))
else:
self._listener.sendLine('display-secondary-osd: {}, {}, {}'.format('center', duration, message))
def setSpeed(self, value):
self._listener.sendLine("set-rate: {:.2n}".format(value))
def setFeatures(self, featureList):
pass
def setPosition(self, value):
self._lastVLCPositionUpdate = time.time()
self._listener.sendLine("set-position: {}".format(value).replace(".", self.radixChar))
def setPaused(self, value):
self._paused = value
if not value:
self._lastVLCPositionUpdate = time.time()
self._listener.sendLine('set-playstate: {}'.format("paused" if value else "playing"))
def getMRL(self, fileURL):
if utils.isURL(fileURL):
fileURL = urllib.parse.quote(fileURL, safe="%/:=&?~#+!$,;'@()*")
return fileURL
fileURL = fileURL.replace('\\', '/')
fileURL = fileURL.encode('utf8')
fileURL = urllib.parse.quote_plus(fileURL)
if isWindows():
fileURL = "file:///" + fileURL
else:
fileURL = "file://" + fileURL
fileURL = fileURL.replace("+", "%20")
return fileURL
def openFile(self, filePath, resetPosition=False):
if not utils.isURL(filePath):
normedPath = os.path.normpath(filePath)
if os.path.isfile(normedPath):
filePath = normedPath
if utils.isASCII(filePath) and not utils.isURL(filePath):
self._listener.sendLine('load-file: {}'.format(filePath))
else:
fileURL = self.getMRL(filePath)
self._listener.sendLine('load-file: {}'.format(fileURL))
def _getFileInfo(self):
self._listener.sendLine("get-duration")
self._listener.sendLine("get-filepath")
self._listener.sendLine("get-filename")
def lineReceived(self, line):
# try:
line = line.decode('utf-8')
self._client.ui.showDebugMessage("player << {}".format(line))
# except:
# pass
match, name, value = self.RE_ANSWER.match(line), "", ""
if match:
name, value = match.group('command'), match.group('argument')
if line == "filepath-change-notification":
self._filechanged = True
t = threading.Thread(target=self._onFileUpdate)
t.setDaemon(True)
t.start()
elif name == "filepath":
self._filechanged = True
if value == "no-input":
self._filepath = None
else:
if "file://" in value:
value = value.replace("file://", "")
if not os.path.isfile(value):
value = value.lstrip("/")
elif utils.isURL(value):
value = urllib.parse.unquote(value)
# value = value.decode('utf-8')
self._filepath = value
self._pathAsk.set()
elif name == "duration":
if value == "no-input":
self._duration = 0
elif value == "invalid-32-bit-value":
self._duration = 0
self.drop(getMessage("vlc-failed-versioncheck"))
else:
self._duration = float(value.replace(",", "."))
self._durationAsk.set()
elif name == "playstate":
self._paused = bool(value != 'playing') if (value != "no-input" and self._filechanged == False) else self._client.getGlobalPaused()
diff = time.time() - self._lastVLCPositionUpdate if self._lastVLCPositionUpdate else 0
if (
self._paused == False and
self._position == self._previousPreviousPosition and
self._previousPosition == self._position and
self._duration and
self._duration > constants.PLAYLIST_LOAD_NEXT_FILE_MINIMUM_LENGTH and
(self._duration - self._position) < constants.VLC_EOF_DURATION_THRESHOLD and
diff > constants.VLC_LATENCY_ERROR_THRESHOLD
):
self._client.ui.showDebugMessage("Treating 'playing' response as 'paused' due to VLC EOF bug")
self.setPaused(True)
self._pausedAsk.set()
elif name == "position":
newPosition = float(value.replace(",", ".")) if (value != "no-input" and not self._filechanged) else self._client.getGlobalPosition()
if newPosition == self._previousPosition and newPosition != self._duration and self._paused is False:
self._client.ui.showDebugMessage(
"Not considering position {} duplicate as new time because of VLC time precision bug".format(
newPosition))
self._previousPreviousPosition = self._previousPosition
self._previousPosition = self._position
self._positionAsk.set()
return
self._previousPreviousPosition = self._previousPosition
self._previousPosition = self._position
self._position = newPosition
if self._position < 0 and self._duration > 2147 and self._vlcVersion == "3.0.0":
self.drop(getMessage("vlc-failed-versioncheck"))
self._lastVLCPositionUpdate = time.time()
self._positionAsk.set()
elif name == "filename":
self._filechanged = True
self._filename = value
self._filenameAsk.set()
elif line.startswith("vlc-version: "):
self._vlcVersion = line.split(': ')[1].replace(' ', '-').split('-')[0]
if not utils.meetsMinVersion(self._vlcVersion, constants.VLC_MIN_VERSION):
self._client.ui.showErrorMessage(getMessage("vlc-version-mismatch").format(constants.VLC_MIN_VERSION))
self._vlcready.set()
@staticmethod
def run(client, playerPath, filePath, args):
vlc = VlcPlayer(client, VlcPlayer.getExpandedPath(playerPath), filePath, args)
return vlc
@staticmethod
def getDefaultPlayerPathsList():
l = []
for path in constants.VLC_PATHS:
p = VlcPlayer.getExpandedPath(path)
if p:
l.append(p)
return l
@staticmethod
def isValidPlayerPath(path):
if "vlc" in path.lower() and VlcPlayer.getExpandedPath(path):
return True
return False
@staticmethod
def getPlayerPathErrors(playerPath, filePath):
return None
@staticmethod
def getIconPath(path):
return constants.VLC_ICONPATH
@staticmethod
def getExpandedPath(playerPath):
if not os.path.isfile(playerPath):
if os.path.isfile(playerPath + "vlc.exe"):
playerPath += "vlc.exe"
return playerPath
elif os.path.isfile(playerPath + "\\vlc.exe"):
playerPath += "\\vlc.exe"
return playerPath
elif os.path.isfile(playerPath + "VLCPortable.exe"):
playerPath += "VLCPortable.exe"
return playerPath
elif os.path.isfile(playerPath + "\\VLCPortable.exe"):
playerPath += "\\VLCPortable.exe"
return playerPath
if os.access(playerPath, os.X_OK):
return playerPath
for path in os.environ['PATH'].split(':'):
path = os.path.join(os.path.realpath(path), playerPath)
if os.access(path, os.X_OK):
return path
def drop(self, dropErrorMessage=None):
if self._listener:
self._vlcclosed.clear()
self._listener.sendLine('close-vlc')
self._vlcclosed.wait()
self._durationAsk.set()
self._filenameAsk.set()
self._pathAsk.set()
self._positionAsk.set()
self._vlcready.set()
self._pausedAsk.set()
if dropErrorMessage:
self.reactor.callFromThread(self._client.ui.showErrorMessage, dropErrorMessage, True)
self.reactor.callFromThread(self._client.stop, False,)
class __Listener(threading.Thread, asynchat.async_chat):
def __init__(self, playerController, playerPath, filePath, args, vlcReady, vlcClosed):
self.__playerController = playerController
self.requestedVLCVersion = False
self.vlcHasResponded = False
self.oldIntfVersion = None
self.timeVLCLaunched = None
call = [playerPath]
if filePath:
if utils.isASCII(filePath):
call.append(filePath)
else:
call.append(self.__playerController.getMRL(filePath))
if isLinux():
if 'snap' in playerPath:
playerController.vlcIntfPath = '/snap/vlc/current/usr/lib/vlc/lua/intf/'
playerController.vlcIntfUserPath = os.path.join(os.getenv('HOME', '.'), "snap/vlc/current/.local/share/vlc/lua/intf/")
else:
playerController.vlcIntfPath = "/usr/lib/vlc/lua/intf/"
playerController.vlcIntfUserPath = os.path.join(os.getenv('HOME', '.'), ".local/share/vlc/lua/intf/")
elif isMacOS():
playerController.vlcIntfPath = "/Applications/VLC.app/Contents/MacOS/share/lua/intf/"
playerController.vlcIntfUserPath = os.path.join(
os.getenv('HOME', '.'), "Library/Application Support/org.videolan.vlc/lua/intf/")
elif isBSD():
# *BSD ports/pkgs install to /usr/local by default.
# This should also work for all the other BSDs, such as OpenBSD or DragonFly.
playerController.vlcIntfPath = "/usr/local/lib/vlc/lua/intf/"
playerController.vlcIntfUserPath = os.path.join(os.getenv('HOME', '.'), ".local/share/vlc/lua/intf/")
elif "vlcportable.exe" in playerPath.lower():
playerController.vlcIntfPath = os.path.dirname(playerPath).replace("\\", "/") + "/App/vlc/lua/intf/"
playerController.vlcIntfUserPath = playerController.vlcIntfPath
else:
playerController.vlcIntfPath = os.path.dirname(playerPath).replace("\\", "/") + "/lua/intf/"
playerController.vlcIntfUserPath = os.path.join(os.getenv('APPDATA', '.'), "VLC\\lua\\intf\\")
playerController.vlcModulePath = playerController.vlcIntfPath + "modules/?.luac"
def _createIntfFolder(vlcSyncplayInterfaceDir):
self.__playerController._client.ui.showDebugMessage("Checking if syncplay.lua intf directory exists")
from pathlib import Path
if os.path.exists(vlcSyncplayInterfaceDir):
self.__playerController._client.ui.showDebugMessage("Found syncplay.lua intf directory:'{}'".format(vlcSyncplayInterfaceDir))
else:
self.__playerController._client.ui.showDebugMessage("syncplay.lua intf directory not found, so creating directory '{}'".format(vlcSyncplayInterfaceDir))
Path(vlcSyncplayInterfaceDir).mkdir(mode=0o755, parents=True, exist_ok=True)
def _intfNeedsUpdating(vlcSyncplayInterfacePath):
self.__playerController._client.ui.showDebugMessage("Checking if '{}' exists and if it is the expected version".format(vlcSyncplayInterfacePath))
if not os.path.isfile(vlcSyncplayInterfacePath):
self.__playerController._client.ui.showDebugMessage("syncplay.lua not found, so file needs copying")
return True
if os.path.isfile(vlcSyncplayInterfacePath):
with open(vlcSyncplayInterfacePath, 'rU') as interfacefile:
for line in interfacefile:
if "local connectorversion" in line:
interface_version = line[26:31]
if interface_version == constants.VLC_INTERFACE_VERSION:
self.__playerController._client.ui.showDebugMessage("syncplay.lua exists and is expected version, so no file needs copying")
return False
else:
self.oldIntfVersion = line[26:31]
self.__playerController._client.ui.showDebugMessage("syncplay.lua is {} but expected version is {} so file needs to be copied".format(interface_version, constants.VLC_INTERFACE_VERSION))
return True
self.__playerController._client.ui.showDebugMessage("Up-to-dateness checks failed, so copy the file.")
return True
if _intfNeedsUpdating(os.path.join(playerController.vlcIntfUserPath, "syncplay.lua")):
try:
_createIntfFolder(playerController.vlcIntfUserPath)
copyForm = utils.findResourcePath("syncplay.lua")
copyTo = os.path.join(playerController.vlcIntfUserPath, "syncplay.lua")
self.__playerController._client.ui.showDebugMessage("Copying VLC Lua Interface from '{}' to '{}'".format(copyForm, copyTo))
import shutil
if os.path.exists(copyTo):
os.chmod(copyTo, 0o755)
shutil.copyfile(copyForm, copyTo)
os.chmod(copyTo, 0o755)
except Exception as e:
playerController._client.ui.showErrorMessage(e)
return
if isLinux():
playerController.vlcDataPath = "/usr/lib/syncplay/resources"
else:
playerController.vlcDataPath = utils.findWorkingDir() + "\\resources"
playerController.SLAVE_ARGS.append(
'--lua-config=syncplay={{modulepath=\"{}\",port=\"{}\"}}'.format(
playerController.vlcModulePath, str(playerController.vlcport)))
call.extend(playerController.SLAVE_ARGS)
if args:
call.extend(args)
self._vlcready = vlcReady
self._vlcclosed = vlcClosed
self._vlcVersion = None
if isWindows() and getattr(sys, 'frozen', '') and getattr(sys, '_MEIPASS', '') is not None: # Needed for pyinstaller --onefile bundle
self.__process = subprocess.Popen(
call, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
shell=False, creationflags=0x08000000)
else:
self.__process = subprocess.Popen(call, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.timeVLCLaunched = time.time()
if self._shouldListenForSTDOUT():
for line in iter(self.__process.stderr.readline, ''):
line = line.decode('utf-8')
self.vlcHasResponded = True
self.timeVLCLaunched = None
if "[syncplay]" in line:
if "Listening on host" in line:
break
if "Hosting Syncplay" in line:
break
elif "Couldn't find lua interface" in line:
playerController._client.ui.showErrorMessage(
getMessage("vlc-failed-noscript").format(line), True)
break
elif "lua interface error" in line:
playerController._client.ui.showErrorMessage(
getMessage("media-player-error").format(line), True)
break
if not isMacOS():
self.__process.stderr = None
else:
vlcoutputthread = threading.Thread(target=self.handle_vlcoutput, args=())
vlcoutputthread.setDaemon(True)
vlcoutputthread.start()
threading.Thread.__init__(self, name="VLC Listener")
asynchat.async_chat.__init__(self)
self.set_terminator(b'\n')
self._ibuffer = []
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self._sendingData = threading.Lock()
def _shouldListenForSTDOUT(self):
return not isWindows()
def initiate_send(self):
with self._sendingData:
asynchat.async_chat.initiate_send(self)
def run(self):
self._vlcready.clear()
self.connect(('localhost', self.__playerController.vlcport))
asyncore.loop()
def handle_connect(self):
asynchat.async_chat.handle_connect(self)
self._vlcready.set()
self.timeVLCLaunched = None
def collect_incoming_data(self, data):
self._ibuffer.append(data)
def handle_close(self):
if self.timeVLCLaunched and time.time() - self.timeVLCLaunched < constants.VLC_OPEN_MAX_WAIT_TIME:
try:
self.__playerController._client.ui.showDebugMessage("Failed to connect to VLC, but reconnecting as within max wait time")
except:
pass
self.run()
elif self.vlcHasResponded:
asynchat.async_chat.handle_close(self)
self.__playerController.drop()
else:
self.vlcHasResponded = True
asynchat.async_chat.handle_close(self)
self.__playerController.drop(getMessage("vlc-failed-connection").format(constants.VLC_MIN_VERSION))
def handle_vlcoutput(self):
out = self.__process.stderr
for line in iter(out.readline, ''):
line = line.decode('utf-8')
if '[syncplay] core interface debug: removing module' in line:
self.__playerController.drop()
break
out.close()
def found_terminator(self):
self.vlcHasResponded = True
self.__playerController.lineReceived(b"".join(self._ibuffer))
self._ibuffer = []
def sendLine(self, line):
if self.connected:
if not self.requestedVLCVersion:
self.requestedVLCVersion = True
self.sendLine("get-vlc-version")
# try:
lineToSend = line + "\n"
self.push(lineToSend.encode('utf-8'))
if self.__playerController._client and self.__playerController._client.ui:
self.__playerController._client.ui.showDebugMessage("player >> {}".format(line))
# except:
# pass
if line == "close-vlc":
self._vlcclosed.set()
if not self.connected and not self.timeVLCLaunched:
# For circumstances where Syncplay is not connected to VLC and is not reconnecting
try:
self.__process.terminate()
except: # When VLC is already closed
pass
|
kahelo.py
|
from __future__ import print_function
import sys
import os
import re
import math
import argparse
import webbrowser
import itertools
import random
import threading
if sys.version_info < (3,):
import StringIO
else:
import io
try:
import sqlite3
sqlite3_available = True
except:
sqlite3_available = False
try:
import xml.etree.cElementTree as ET
except:
import xml.etree.ElementTree as ET
import six.moves.urllib.request as requests
import six.moves.urllib.error as urllib_error
import six.moves.configparser as configparser
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from PIL import Image, ImageDraw
from time import time, sleep, strftime, gmtime
IDENTITY = """\
kahelo - tile management for GPS maps - kahelo.godrago.net\
"""
VERSION = '1.00'
LICENSE = """\
Copyright (c) 2014 Gilles Arcas-Luque (gilles dot arcas at gmail dot com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# -- Constants ---------------------------------------------------------------
APPNAME = 'kahelo'
MAXZOOM = 18
# -- Command line parsing ----------------------------------------------------
USAGE = """
-describe <db name> [-db_format <db format] [-tile_format <tile format>] [-url_template <url template>]
-insert <db name> <tileset> [-force]
-import <db name> <tileset> [-force] -source <db name>
-export <db name> <tileset> [-force] -dest <db name>
-delete <db name> <tileset>
-view <db name> <tileset> [-image <image name>]
-count <db name> <tileset>
-stat <db name> <tileset>
-server <db name>
tileset:
-track <track_filename> -zoom <zoom_level> [-radius <in kilometers>]
-tracks <track_filename> -zoom <zoom_level> [-radius <in kilometers>]
-contour <track_filename> -zoom <zoom_level> [-radius <in kilometers>]
-contours <track_filename> -zoom <zoom_level> [-radius <in kilometers>]
-project <project_filename>
-records [-zoom <zoom_level>]
-tiles xmin,ymin,xmax,ymax -zoom <zoom_level>
-inside limits tilesets to the intersection with the argument database
-zoom 1-14,16/12 zoom levels 1 to 14 and 16, level 12 subdivised into higher levels
url template examples:
OpenStreetMap: http://[abc].tile.openstreetmap.org/{z}/{x}/{y}.png
may be abbreviated as OpenStreetMap
MapQuest: http://otile[1234].mqcdn.com/tiles/1.0.0/osm/{z}/{x}/{y}.jpg
may be abbreviated as MapQuest
full help:
APPNAME.html\
"""
class ArgumentParser(argparse.ArgumentParser):
def __init__(self):
usage = USAGE.replace('APPNAME', APPNAME)
argparse.ArgumentParser.__init__(self, usage=usage, add_help=False)
group = self.add_argument_group('Information')
group.add_argument('-version', action='store_true', help='print version number', dest='do_version')
group.add_argument('-license', action='store_true', help='display text of license', dest='do_license')
group.add_argument('-help', action='store_true', help='show this help message', dest='do_help')
group.add_argument('-Help', action='store_true', help='open html help page', dest='do_helphtml')
group.add_argument('-verbose', action='store_true', help='detailed feedback', dest='verbose')
group.add_argument('-quiet', action='store_true', help='minimal feefback', dest='quiet')
agroup = self.add_argument_group('Commands')
xgroup = agroup.add_mutually_exclusive_group()
xgroup.add_argument('-describe', metavar='db_name', action='store', dest='db_describe', help='set database properties')
xgroup.add_argument('-insert', metavar='db_name', action='store', dest='db_insert', help='download and insert tiles in database')
xgroup.add_argument('-import', metavar='db_name', action='store', dest='db_import', help='import tiles')
xgroup.add_argument('-export', metavar='db_name', action='store', dest='db_export', help='export tiles')
xgroup.add_argument('-delete', metavar='db_name', action='store', dest='db_delete', help='delete tiles')
xgroup.add_argument('-count', metavar='db_name', action='store', dest='db_count' , help='count tiles')
xgroup.add_argument('-view', metavar='db_name', action='store', dest='db_view' , help='make an image from tiles')
xgroup.add_argument('-server', metavar='db_name', action='store', dest='db_server', help='connect to dabase through http')
xgroup.add_argument('-stat', metavar='db_name', action='store', dest='db_stat' , help='statistics')
agroup = self.add_argument_group('Database properties')
if sqlite3_available:
db_ids = ('maverick', 'folder', 'rmaps', 'kahelo')
else:
db_ids = ('maverick', 'folder')
img_ids = ('png', 'jpg', 'server')
agroup.add_argument('-db_format' , action='store', dest='db_format', choices=db_ids)
agroup.add_argument('-tile_format' , action='store', dest='tile_format', choices=img_ids)
agroup.add_argument('-url_template', action='store', dest='url_template', help='url template for tile server')
agroup = self.add_argument_group('Tile database source and destination')
agroup.add_argument('-source' , metavar='db_name', action='store', dest='db_source', help='source database')
agroup.add_argument('-destination', metavar='db_name', action='store', dest='db_dest' , help='destination database')
agroup = self.add_argument_group('Tile source')
xgroup = agroup.add_mutually_exclusive_group()
xgroup.add_argument('-track' , action='store', dest='track', help='track filename')
xgroup.add_argument('-tracks' , action='store', dest='tracks', help='track filename')
xgroup.add_argument('-contour' , action='store', dest='contour', help='contour filename')
xgroup.add_argument('-contours', action='store', dest='contours', help='contour filename')
xgroup.add_argument('-project' , action='store', dest='project', help='project filename')
xgroup.add_argument('-records' , action='store_true', dest='db_tiles', help='tiles from database')
xgroup.add_argument('-tiles' , action='store', dest='coord_tiles', help='tile coordinates')
agroup.add_argument('-zoom' , action='store', dest='zoom', help='zoom 0-%d' % MAXZOOM)
agroup.add_argument('-radius' , action='store', dest='radius', help='include disk radius in km')
agroup.add_argument('-inside' , action='store_true', dest='inside', help='limit tilesets to intersection with database')
agroup = self.add_argument_group('Other parameters')
agroup.add_argument('-force' , action='store_true', dest='force_insert', help='force insertion into database')
agroup.add_argument('-image' , action='store', dest='image', help='name of output image')
def error(self, message):
error(message)
def parse_args(self, argstring=None):
if argstring is None:
options = argparse.ArgumentParser.parse_args(self)
else:
options = argparse.ArgumentParser.parse_args(self, argstring.split())
# nothing more to do if help or version
if options.do_version or options.do_license or options.do_help or options.do_helphtml:
return options
# upper case constant argument values
if options.db_format is not None:
options.db_format = options.db_format.upper()
if options.tile_format is not None:
options.tile_format = options.tile_format.upper()
# add db_name attribute
options.db_name = (options.db_describe or options.db_count or
options.db_insert or options.db_import or
options.db_export or options.db_delete or
options.db_view or options.db_stat or
options.db_server or None)
# expand url aliases
if options.url_template == 'OpenStreetMap':
options.url_template = r'http://[abc].tile.openstreetmap.org/{z}/{x}/{y}.png'
if options.url_template == 'MapQuest':
options.url_template = r'http://otile[1234].mqcdn.com/tiles/1.0.0/osm/{z}/{x}/{y}.jpg'
# nothing more to do for -describe or -server
if options.db_describe or options.db_server:
return options
complete_source(options)
return options
def complete_source(options):
# set tile generator and tile origin
if options.track:
options.tile_generator, options.tile_source = tile_track_generator, options.track
elif options.tracks:
options.tile_generator, options.tile_source = tile_tracks_generator, options.tracks
elif options.contour:
options.tile_generator, options.tile_source = tile_contour_generator, options.contour
elif options.contours:
options.tile_generator, options.tile_source = tile_contours_generator, options.contours
elif options.project:
options.tile_generator, options.tile_source = tile_project_generator, options.project
elif options.db_tiles:
options.tile_generator, options.tile_source = db_tiles_generator, None
elif options.coord_tiles:
options.tile_generator, options.tile_source = coord_tiles_generator, options.coord_tiles
else:
error('source is missing ')
# replace tile coordinate string with integer coordinates
if options.coord_tiles:
try:
options.coord_tiles = [int(x) for x in options.coord_tiles.split(',')]
except:
error('incorrect tile rectangle coordinates (xmin,ymin,xmax,ymax)')
# replace zoom string with list of zoom values
if options.zoom is None:
if options.project:
options.zoom = list(range(MAXZOOM + 1))
elif options.db_tiles:
options.zoom = list(range(MAXZOOM + 1))
else:
error('zoom must be given')
else:
options.zoom, options.zoom_limit = decode_range_ex(options.zoom)
if options.zoom is None or not all(0 <= n <= MAXZOOM for n in options.zoom):
error('zoom values must be integers between 0 and %d' % MAXZOOM)
if (options.zoom_limit is None or not (0 <= options.zoom_limit <= MAXZOOM
or options.zoom_limit == 1000)):
error('zoom limit must be an integer between 0 and %d' % MAXZOOM)
# convert radius argument to float
if options.radius is None:
pass
else:
try:
options.radius = float(options.radius)
except:
error('radius must be a positive number')
if options.radius < 0:
error('radius must be a positive number')
# used to find gpx files in path of project
options.project_filename = None
class ProjectParser(argparse.ArgumentParser):
def __init__(self):
argparse.ArgumentParser.__init__(self)
group = self.add_mutually_exclusive_group()
group.add_argument('-track' , action='store', dest='track')
group.add_argument('-tracks' , action='store', dest='tracks')
group.add_argument('-contour' , action='store', dest='contour')
group.add_argument('-contours', action='store', dest='contours')
group.add_argument('-project' , action='store', dest='project')
group.add_argument('-records' , action='store_true', dest='db_tiles')
group.add_argument('-tiles' , action='store', dest='coord_tiles')
self.add_argument('-zoom' , action='store', dest='zoom')
self.add_argument('-radius' , action='store', dest='radius')
self.add_argument('-inside' , action='store_true', dest='inside')
def error(self, msg):
error('incorrect project syntax: ' + msg)
def parse_args(self, arglist):
options = argparse.ArgumentParser.parse_args(self, arglist)
complete_source(options)
return options
def decode_range(s):
"""Decode a range string into a list of integers: 8-10,12,14 --> [8, 9, 10, 12, 14]"""
R = []
for x in s.split(','):
m = re.search(r'(\d+)-(\d+)', x)
if m:
i1 = int(m.group(1))
i2 = int(m.group(2))
R.extend(list(range(i1, i2 + 1)))
elif x.isdigit():
R.append(int(x))
else:
return None
return R
def decode_range_ex(s):
"""Decode a zoom argument: 8-10,12,14/12 --> [8, 9, 10, 12, 14], 12"""
if ('/') not in s:
return decode_range(s), 1000
else:
zoom_range, zoom_limit = s.split('/')
dec_range = decode_range(zoom_range)
dec_limit = int(zoom_limit) if zoom_limit.isdigit() else None
return dec_range, dec_limit
def options_generate(options):
return options.tile_generator, options.tile_source, options.zoom, options.radius
def default_radius(x, y, zoom):
radius_tu = 0.5
radius_km = tile_distance_km(x, y, x + radius_tu, y, zoom)
return radius_km
# -- Advanced settings from configuration files ------------------------------
# The following docstring is used to create the configuration file.
# It gives the default values for the advanced settings.
DEFAULTS = \
"""
[database]
tile_validity = 3650 ; number of days, 0 to ignore
commit_period = 100
[insert]
request_delay = 0.05 ; seconds
timeout = 3 ; seconds
number_of_attempts = 3
session_max = 1000000
[import/export]
draw_tile_limits = False ; True or False
draw_tile_width = False ; True or False
[view]
max_dim = 10000 ; pixels
antialias = False ; True (slower, better quality) or False
draw_upper_tiles = False ; True or False
draw_tile_limits = True ; True or False
draw_tile_width = False ; True or False
draw_tracks = True ; True or False
draw_points = False ; True or False
draw_circles = False ; True or False
[tiles]
jpeg_quality = 85 ; 1 (very poor) to 100 (lossless)
background_color = 32 32 32 ; RGB
missing_tile_color = 128 128 128 ; RGB
border_valid_color = 255 255 255 128 ; RGBA
border_expired_color = 255 0 0 192 ; RGBA
track_color = 255 0 0 2 ; RGBW (width)
[server]
port = 80
"""
DEFAULTS_ADVANCED = \
"""
[tracks]
interpolate_points = True
[view]
true_tiles = True
interpolated_points = False
[tiles]
ghost_tile_color = 64 64 64
"""
class KaheloConfigParser (configparser.ConfigParser):
"""Add input checking."""
def __init__(self):
if sys.version_info < (3,):
configparser.ConfigParser.__init__(self)
else:
configparser.ConfigParser.__init__(self, inline_comment_prefixes=(';',))
def error(self, section, entry):
error('missing or incorrect config value: [%s]%s' % (section, entry))
def getint(self, section, entry):
try:
return configparser.ConfigParser.getint(self, section, entry)
except Exception as e:
print(e)
self.error(section, entry)
def getboolean(self, section, entry):
try:
return configparser.ConfigParser.getboolean(self, section, entry)
except Exception as e:
print(e)
self.error(section, entry)
def getcolor(self, section, entry, n):
try:
s = configparser.ConfigParser.get(self, section, entry)
x = tuple([int(x) for x in s.split()])
except:
self.error(section, entry)
if len(x) == n:
return x
else:
self.error(section, entry)
def configfilename():
if __name__ == "__main__":
name = sys.argv[0]
else:
name = __file__
return os.path.splitext(name)[0] + '.config'
def createconfig(config_filename, defaults):
with open(config_filename, 'wt') as f:
f.writelines(defaults)
def read_config(options):
config_filename = configfilename()
advanced_config_filename = config_filename + '.advanced'
try:
if not os.path.exists(config_filename):
createconfig(config_filename, DEFAULTS)
if not os.path.exists(advanced_config_filename):
createconfig(advanced_config_filename, DEFAULTS_ADVANCED)
except:
error('error creating configuration file')
try:
getconfig(options, config_filename, advanced_config_filename)
except CustomException:
raise
except Exception as e:
error('error reading configuration file :' + str(e))
def resetconfig():
createconfig(configfilename(), DEFAULTS)
def setconfig(section, key, value):
config = KaheloConfigParser()
config.read(configfilename())
config.set(section, key, value)
with open(configfilename(), 'wt') as configfile:
config.write(configfile)
def getconfig(options, config_filename, advanced_config_filename):
class SubOptions: pass
options.database = SubOptions()
options.insert = SubOptions()
options.Import = SubOptions() # import is reserved
options.view = SubOptions()
options.tiles = SubOptions()
options.server = SubOptions()
options.Tracks = SubOptions() # tracks is used for tileset
config = KaheloConfigParser()
config.read(config_filename)
# [database]
options.database.tile_validity = config.getint('database', 'tile_validity')
options.database.commit_period = config.getint('database', 'commit_period')
# [insert]
options.insert.request_delay = config.getfloat('insert', 'request_delay')
options.insert.timeout = config.getfloat('insert', 'timeout')
options.insert.number_of_attempts = config.getint('insert', 'number_of_attempts')
options.insert.session_max = config.getint('insert', 'session_max')
# [import/export]
options.Import.draw_tile_limits = config.getboolean('import/export', 'draw_tile_limits')
options.Import.draw_tile_width = config.getboolean('import/export', 'draw_tile_width')
# [view]
options.view.max_dim = config.getint('view', 'max_dim')
options.view.antialias = config.getboolean('view', 'antialias')
options.view.draw_upper_tiles = config.getboolean('view', 'draw_upper_tiles')
options.view.draw_tile_limits = config.getboolean('view', 'draw_tile_limits')
options.view.draw_tile_width = config.getboolean('view', 'draw_tile_width')
options.view.draw_tracks = config.getboolean('view', 'draw_tracks')
options.view.draw_points = config.getboolean('view', 'draw_points')
options.view.draw_circles = config.getboolean('view', 'draw_circles')
# [tiles]
options.tiles.jpeg_quality = config.getint('tiles', 'jpeg_quality')
options.tiles.background_color = config.getcolor('tiles', 'background_color', 3)
options.tiles.missing_tile_color = config.getcolor('tiles', 'missing_tile_color', 3)
options.tiles.border_valid_color = config.getcolor('tiles', 'border_valid_color', 4)
options.tiles.border_expired_color = config.getcolor('tiles', 'border_expired_color', 4)
options.tiles.track_color = config.getcolor('tiles', 'track_color', 4)
# [server]
options.server.port = config.getint('server', 'port')
# advanced parameters
config.read(advanced_config_filename)
# [tracks]
options.Tracks.interpolate_points = config.getboolean('tracks', 'interpolate_points')
# [view]
options.view.true_tiles = config.getboolean('view', 'true_tiles')
options.view.interpolated_points = config.getboolean('view', 'interpolated_points')
# [tiles]
options.tiles.ghost_tile_color = config.getcolor('tiles', 'ghost_tile_color', 3)
today = int(math.floor(time()))
validity = options.database.tile_validity * (3600 * 24)
options.database.expiry_date = today - validity
# -- Error handling ----------------------------------------------------------
class CustomException(Exception):
pass
def error(msg):
print(APPNAME, 'error:', msg)
print('-help or -h for more information')
raise CustomException()
# -- Command dispatcher ------------------------------------------------------
def apply_command(options):
if options.do_version:
print_version()
elif options.do_license:
print_license()
elif options.do_help:
print_help()
elif options.do_helphtml:
do_helphtml()
elif options.db_describe:
do_describe(options.db_name, options)
elif options.db_count:
return do_count(options.db_name, options)
elif options.db_insert:
do_insert(options.db_name, options)
elif options.db_import:
do_import(options.db_name, options)
elif options.db_export:
do_export(options.db_name, options)
elif options.db_delete:
do_delete(options.db_name, options)
elif options.db_view:
do_makeview(options.db_name, options)
elif options.db_server:
do_server(options.db_name, options)
elif options.db_stat:
do_statistics(options.db_name, options)
else:
error('no command given')
# -- Conversions between tile units and latitude/longitude -------------------
EARTH_RADIUS = 6371
def deg2tilecoord(lat_deg, lon_deg, zoom):
"""
Convert latitude,longitude coordinates in degrees into tile coordinates for
given zoom.
"""
try:
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = (lon_deg + 180.0) / 360.0 * n
ytile = (1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n
return xtile, ytile
except:
error('error converting (%.4f, %.4f, %d) to tile' % (lat_deg, lon_deg, zoom))
def deg2tile(lat_deg, lon_deg, zoom):
"""
Convert latitude,longitude coordinates in degrees into tile units (rounded)
for given zoom.
"""
xtile, ytile = deg2tilecoord(lat_deg, lon_deg, zoom)
return int(xtile), int(ytile)
def tile2deg(xtile, ytile, zoom):
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return lat_deg, lon_deg
def sqr(x):
return x * x
def asinx(x):
# has to bound the parameter due rounding errors n parameter calculus
x = -1 if x < -1 else 1 if x > 1 else x
return math.asin(x)
def haversine_distance(lat1, lon1, lat2, lon2):
# coordinates in degrees, result in kilometer
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
a = sqr(math.sin((lat1 - lat2) / 2)) + sqr(math.sin((lon1 - lon2) / 2)) * math.cos(lat1) * math.cos(lat2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = EARTH_RADIUS * c
return d
def shift_longitude(lat, lon, d):
# coordinates and result in degrees, d in kilometer
lat = math.radians(lat)
lon = math.radians(lon)
return math.degrees(lat), math.degrees(lon - 2 * asinx(math.sin(d / 2.0 / EARTH_RADIUS) / math.cos(lat)))
def shift_latitude(lat, lon, d):
# coordinates and result in degrees, d in kilometer
lat = math.radians(lat)
lon = math.radians(lon)
return math.degrees(lat - d / EARTH_RADIUS), math.degrees(lon)
def tile_shift_longitude(x, y, zoom, d):
# x, y and result in tile units, d in kilometer
lat, lon = tile2deg(x, y, zoom)
lat2, lon2 = shift_longitude(lat, lon, d)
return deg2tilecoord(lat2, lon2, zoom)
def tile_shift_latitude(x, y, zoom, d):
# x, y and result in tile units, d in kilometer
lat, lon = tile2deg(x, y, zoom)
lat2, lon2 = shift_latitude(lat, lon, d)
return deg2tilecoord(lat2, lon2, zoom)
def tile_distance_km(x1, y1, x2, y2, zoom):
# x1, y1, x2, y2 in tile units, result in kilometer
lat1, lon1 = tile2deg(x1, y1, zoom)
lat2, lon2 = tile2deg(x2, y2, zoom)
d = haversine_distance(lat1, lon1, lat2, lon2)
return d
def tile_hdistance_tu(x, y, zoom, d):
# x, y in tile units, d in kilometer, result in tile units
x2, y2 = tile_shift_longitude(x, y, zoom, d)
d = abs(x - x2)
return d
# -- Tile utilities ----------------------------------------------------------
def binding_box(tiles):
xmin = 1000000000
xmax = 0
ymin = 1000000000
ymax = 0
for tile in tiles:
x, y = tile[0], tile[1] # work for (x,y) or (x,y,z)
if x < xmin:
xmin = x
if x > xmax:
xmax = x
if y < ymin:
ymin = y
if y > ymax:
ymax = y
return xmin, ymin, xmax, ymax
def interior(tiles):
xmin, ymin, xmax, ymax = binding_box(tiles)
map = dict()
for x in range(xmin, xmax + 1):
map[x] = dict()
for y in range(ymin, ymax + 1):
map[x][y] = 0
for x, y in tiles:
map[x][y] = 1
stack = []
for x in range(xmin, xmax + 1):
stack.extend(((x, ymin), (x, ymax)))
for y in range(ymin, ymax + 1):
stack.extend(((xmin, y), (xmax, y)))
while len(stack) > 1:
x, y = stack.pop()
if xmin <= x <= xmax and ymin <= y <= ymax:
if map[x][y] == 0:
map[x][y] = 2
stack.extend(((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)))
res = []
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
if map[x][y] <= 1:
res.append((x, y))
return res
def interpolate_points(tile_points):
# tile_points is a list of point in tile units
# adds points at integer coordinates
tiles = set()
for index, tile_point in enumerate(tile_points[:-1]):
x1, y1 = tile_point
x2, y2 = tile_points[index + 1]
tiles.add((x1, y1))
if int(x1) == int(x2) and int(y1) == int(y2):
continue
if x1 == x2:
Y1, Y2 = sorted((y1, y2))
for y in range(int(Y1) + 1, int(Y2)):
tiles.add((x1, y))
else:
a = float(y2 - y1) / (x2 - x1)
b = y1 - a * x1
if abs(x2 - x1) > abs(y2 - y1):
X1, X2 = sorted((x1, x2))
for x in range(int(X1) + 1, int(X2) + 1):
tiles.add((x, a * x + b))
else:
Y1, Y2 = sorted((y1, y2))
for y in range(int(Y1) + 1, int(Y2) + 1):
tiles.add(((y - b) / a, y))
tiles.add((x2, y2))
return list(tiles)
def circle_tiles(x, y, zoom, radius_km, tiles):
# x, y tile coordinates, radius in km
radius_tu = tile_hdistance_tu(x, y, zoom, radius_km)
x0 = x - radius_tu
x1 = x + radius_tu
tiles.add((int(x0), int(y)))
tiles.add((int(x), int(y + radius_tu)))
tiles.add((int(x), int(y - radius_tu)))
for xt in range(int(x0) + 1, int(x1) + 1):
h = math.sqrt(sqr(radius_tu) - sqr(xt - x))
y0 = y - h
y1 = y + h
for yt in range(int(y0), int(y1) + 1):
tiles.add((xt, yt))
tiles.add((xt - 1, yt))
def expand_tiles(segments, options, zoom, radius_km):
tiles = set()
if radius_km is None:
x, y = segments[0][0]
radius_km = default_radius(x, y, zoom)
for segment in segments:
if options.Tracks.interpolate_points is False:
tilelist = segment
else:
tilelist = interpolate_points(segment)
for x, y in tilelist:
if radius_km == 0:
tiles.add((int(x), int(y)))
else:
circle_tiles(x, y, zoom, radius_km, tiles)
tilemin = 0
tilemax = 2 ** zoom - 1
return list([(x, y) for x, y in tiles if tilemin <= x <= tilemax and tilemin <= y <= tilemax])
# -- Parsing gpx files -------------------------------------------------------
# cache for gpx trees as parsing is expensive
GpxCache = dict()
def namespace(root):
# http://www.topografix.com/GPX/1/0
# http://www.topografix.com/GPX/1/1
return root.tag[1:-4]
def read_gpx(gpx_filename):
# read a gpx file as a list of tracks
# read a track as a list of segments
# read a segment as a list of points
# read a point as a couple of floats (lat, lon)
global GpxCache
if gpx_filename in GpxCache:
return GpxCache[gpx_filename]
try:
tree = ET.parse(gpx_filename)
except IOError:
error('error reading ' + gpx_filename)
except ET.ParseError:
error('error parsing ' + gpx_filename)
root = tree.getroot()
xmlns = namespace(root)
def ns_tag(tag):
if xmlns == '':
return tag
else:
return str(ET.QName(xmlns, tag))
trklist = []
for trk in root.findall(ns_tag('trk')):
seglist = []
for seg in trk.findall(ns_tag('trkseg')):
ptlist = []
for point in seg.findall(ns_tag('trkpt')):
lat, lon = float(point.get('lat')), float(point.get('lon'))
ptlist.append((lat, lon))
seglist.append(ptlist)
trklist.append(seglist)
if trklist == []:
error('no points found in gpx file')
GpxCache[gpx_filename] = trklist
return trklist
def find_file(filename, options):
"""Search the file either locally or in the directory of the project file."""
if os.path.isfile(filename):
return filename
elif os.path.isabs(filename):
return filename
elif options.project_filename is None:
return filename
else:
return os.path.join(os.path.dirname(options.project_filename), filename)
def track_segments(filename, zoom, options):
filename = find_file(filename, options)
if options.project:
return track_segments_project(filename, zoom, options)
else:
return track_segments_gpx(filename, zoom, options)
def track_segments_gpx(gpx_filename, zoom, options):
"""Return the list of all segments in gpx file in tile units."""
gpx = read_gpx(gpx_filename)
segments = []
for track in gpx:
for segment in track:
segments.append([deg2tilecoord(lat, lon, zoom) for lat,lon in segment])
return segments
def track_segments_project(project_filename, zoom, options):
"""Return the list of all segments in gpx files in project in tile units."""
segments = []
for options_ in project_options(options):
gpx_filename = (options_.track or options_.tracks or
options_.contour or options_.contours or None)
if gpx_filename:
segments.extend(track_segments(gpx_filename, zoom, options_))
elif options_.project:
segments.extend(track_segments_project(options_.project, zoom, options_))
else:
pass
return segments
def track_points(filename, zoom, options):
filename = find_file(filename, options)
if options.project:
return track_points_project(filename, zoom, options)
else:
return track_points_gpx(filename, zoom, options)
def track_points_gpx(gpx_filename, zoom, options):
"""Return the list of all points in gpx file in tile units."""
points_tu = []
for segment in track_segments_gpx(gpx_filename, zoom, options):
if options.view.interpolated_points:
points_tu.extend(interpolate_points(segment))
else:
points_tu.extend(segment)
return points_tu
def track_points_project(project_filename, zoom, options):
"""Return the list of all points in gpx files in project in tile units."""
points_tu = []
for options_ in project_options(options):
gpx_filename = (options_.track or options_.tracks or
options_.contour or options_.contours or None)
if gpx_filename:
points_tu.extend(track_points(gpx_filename, zoom, options_))
elif options_.project:
points_tu.extend(track_points_project(options_.project, zoom, options_))
else:
pass
return points_tu
# -- Generation of tile sets -------------------------------------------------
# tile set generator
# - iterator (possibly yield generator)
# - precalculated full size (taking into account tile subdivision)
class TileSet:
def __init__(self, gen=None, size=0):
if gen is None:
self.gen = itertools.chain()
self.size_ = 0
else:
self.gen = gen
self.size_ = size
def __iter__(self):
return self.gen
def size(self):
return self.size_
def extend(self, tileset):
self.gen = itertools.chain(self.gen, tileset.gen)
self.size_ += tileset.size_
def binding_box(self):
# has to copy the tile stream consumed by the call to binding_box
tiles = list(self.gen)
self.gen = iter(tiles)
return binding_box(tiles)
# subdivision generator
# created with a list of (x, y)
# when iterating, a tile at level current_zoom is subdivised at level
# target_zoom
def subdivise(tiles, zoom_current, zoom_target):
ratio = 2 ** (zoom_target - zoom_current)
for x, y in tiles:
for X in range(ratio):
for Y in range(ratio):
yield x * ratio + X, y * ratio + Y, zoom_target
# filtering with database and zoom
def filter_tileset_with_db(tileset, db, zoom):
"""
Return the list of tiles from tileset less the tiles absent from db. This is
activated with the -inside parameter and useless with some commands (-insert
and -import).
Return a list because its needs to be scanned several times (starting with
length).
"""
db_tiles = db.list_tiles((zoom,))
tileset = list(set(db_tiles).intersection(tileset))
return tileset, len(tileset)
def filter_tileset_with_zoom(tileset, zoom):
# maybe useful
tileset = [tile for tile in tileset if tile[2] == zoom]
return tileset, len(tileset)
# track and contour tile generators
# the next four functions return a list of (x, y)
def tile_track_generator(options, gpx_filename, zoom, radius):
# returns list of tiles for track
# segments are considered connected
segments = track_segments(gpx_filename, zoom, options)
# be sure there will be no gap between segments
for index, segment in enumerate(segments[:-1]):
next = index + 1
segment.append(segments[next][0])
tiles = expand_tiles(segments, options, zoom, radius)
return tiles
def tile_tracks_generator(options, gpx_filename, zoom, radius):
# return list of tiles for tracks
# each segment is considered as a separate track
segments = track_segments(gpx_filename, zoom, options)
tiles = expand_tiles(segments, options, zoom, radius)
return tiles
def tile_contour_generator(options, gpx_filename, zoom, radius):
# return list of tiles for contour
# segments are considered connected
segments = track_segments(gpx_filename, zoom, options)
# be sure there will be no gap between segments
n = len(segments)
for index, segment in enumerate(segments):
next = (index + 1) % n
segment.append(segments[next][0])
tiles = expand_tiles(segments, options, zoom, radius)
return interior(tiles)
def tile_contours_generator(options, gpx_filename, zoom, radius):
# return list of tiles for contours
# each segment is considered as a separate contour
segments = track_segments(gpx_filename, zoom, options)
all_tiles = set()
for segment in segments:
segment.append(segment[0])
tiles = expand_tiles((segment,), options, zoom, radius)
for x in interior(tiles):
all_tiles.add(x)
return list(all_tiles)
# tile set generator for -track, -contour, -contours
def tile_list_generator(options, db_source, db_filter):
""" Generate tiles from track(s) or contour(s).
Handle list of zoom levels.
Handle zoom subdivision (18/12).
Handle intersection with database tiles if enabled.
Return dedicated iterator with total and reduced lengths.
"""
generator, source, zooms, radius = options_generate(options)
tile_set = TileSet()
for zoom in zooms:
tile_set.extend(tile_list_generate_level(options, generator, source, zoom, radius, db_source, db_filter))
return tile_set
def tile_list_generate_level(options, generator, source, zoom, radius, db_source, db_filter):
print(source, zoom)
source = find_file(source, options)
if zoom <= options.zoom_limit:
# no subdivision required
gen0 = generator(options, source, zoom, radius)
gen = ((x, y, zoom) for x, y in gen0)
size = len(gen0)
else:
# prepare tile coordinates for subdivision
gen0 = generator(options, source, options.zoom_limit, radius)
gen = subdivise(gen0, options.zoom_limit, zoom)
size = len(gen0) * sqr(2 ** (zoom - options.zoom_limit))
if db_filter:
tileset, size = filter_tileset_with_db(gen, db_source, zoom)
else:
tileset, size = gen, size
return TileSet(tileset, size)
# tile set generator for -project
def tile_project_generator(options, project, zoom, radius, db_source, db_filter):
if zoom is None:
all_zooms = list(range(MAXZOOM + 1))
else:
all_zooms = zoom
tile_set = TileSet()
for z in all_zooms:
ts = []
for options_ in project_options(options):
options_.inside = options.inside or options_.inside
options_.zoom = [z] if z in options_.zoom else []
if radius is not None:
if options_.radius is None:
options_.radius = radius
else:
options_.radius = min(options_.radius, radius)
ts2 = tileset(options_, db_source, db_filter)
ts = set(ts).union(ts2)
tile_set.extend(TileSet(ts, len(ts)))
return tile_set
def project_options(options):
result = []
for line in read_project(options.project, options):
options_ = ProjectParser().parse_args(line.split())
options_.project_filename = find_file(options.project, options)
options_.db_name = options.db_name
read_config(options_)
result.append(options_)
return result
def read_project(project_filename, options):
try:
result = []
with open(find_file(project_filename, options)) as f:
for line in f:
# remove comments (from semicolon to end of line)
line = re.sub(' *;.*', '', line).strip()
if line == '':
continue
result.append(line)
return result
except:
error('error reading project ' + project_filename)
# tile set generator for -records
def db_tiles_generator(options, source, zooms, radius, db_source):
if radius:
error('radius is not used for -record tile set')
tiles = db_source.list_tiles(zooms)
size = len(tiles)
return TileSet(iter(tiles), size)
# tile set generator for -tiles
def coord_tiles_generator(options, source, zooms, radius, db_source, db_filter):
if radius:
error('radius is not used for -tiles tile set')
if len(zooms) == 0:
return TileSet()
elif len(zooms) != 1:
error('only one zoom level required')
else:
zoom = zooms[0]
xmin, ymin, xmax, ymax = options.coord_tiles
# gen is a generator not a list, because we do not want to store a very
# large set before filtering against db
gen = ((x, y, zoom) for x in range(xmin, xmax + 1) for y in range(ymin, ymax + 1))
size = (xmax - xmin + 1) * (ymax - ymin + 1)
if options.inside:
tileset, size = filter_tileset_with_db(gen, db_source, zoom)
else:
tileset, size = gen, size
return TileSet(iter(tileset), size)
# tile set factory
def tileset(options, db, db_filter=False):
"""Return a TileSet object"""
try:
if options.db_tiles:
generator, source, zoom, radius = options_generate(options)
return db_tiles_generator(options, source, zoom, radius, db)
elif options.coord_tiles:
generator, source, zoom, radius = options_generate(options)
return coord_tiles_generator(options, source, zoom, radius, db, db_filter)
elif options.project:
generator, source, zoom, radius = options_generate(options)
return tile_project_generator(options, source, zoom, radius, db, db_filter)
else:
return tile_list_generator(options, db, db_filter)
except MemoryError:
error('not enough memory, decrease zoom or contour area')
except:
raise
# -- Database classes --------------------------------------------------------
class TileDatabase:
def __init__(self, fullname, tile_format, url_template):
self.fullname = fullname
self.__tile_format = tile_format
self.__url_template = url_template
def tile_format(self):
# provide the format of tiles stored in database
if self.__tile_format == 'SERVER':
conv = {'.jpg':'JPG', '.png':'PNG'}
try:
ext = os.path.splitext(self.url_template())[1]
return conv[ext.lower()]
except:
error('unable to determine tile format from url template')
else:
return self.__tile_format
def url_template(self):
# provide url template to access to tile server
return self.__url_template
def tile_ext(self):
if self.tile_format().startswith('JPG'):
return 'jpg'
elif self.tile_format().startswith('PNG'):
return 'png'
elif self.tile_format() == 'SERVER':
return os.path.splitext(self.url_template())[1]
elif self.tile_format() == '':
error('tile format missing, use -describe with -tile_format')
else:
error('tile format is not handled')
def exists(self, x, y, zoom):
# return (True, date) if exists else (False, None)
pass
def upper_tile(self, x, y, zoom):
for z in range(zoom - 1, 0, -1):
scale = 2 ** (zoom - z)
x1 = x / scale
y1 = y / scale
if self.exists(x1, y1, z)[0]:
return x1, y1, z
else:
return None
def retrieve(self, x, y, zoom):
# return (True, date, pil_image) if exists else (False, None, None)
pass
def retrieve_buffer(self, x, y, zoom):
# return (True, date, image_buffer) if exists else (False, None, None)
pass
def update(self, date, x, y, zoom, tile):
pass
def count_tiles(self, zoom):
pass
def list_tiles(self, zoom):
pass
def commit(self):
pass
def pack(self):
pass
def close(self):
pass
class SqliteDatabase(TileDatabase):
def __init__(self, db_name, tile_format, url_template):
TileDatabase.__init__(self, db_name, tile_format, url_template)
self.conn = sqlite3.connect(db_name, check_same_thread=False)
if sys.version_info < (3,):
self.conn.text_factory = str
else:
self.conn.text_factory = bytes
self.cursor = self.conn.cursor()
def execute(self, request, *args):
self.cursor.execute(request, args)
def commit(self):
self.conn.commit()
def pack(self):
self.execute('vacuum')
def close(self):
self.conn.close()
class KaheloDatabase(SqliteDatabase):
def __init__(self, db_name, tile_format, url_template):
SqliteDatabase.__init__(self, db_name, tile_format, url_template)
self.execute('CREATE TABLE IF NOT EXISTS server (template text, format text)')
self.execute('CREATE TABLE IF NOT EXISTS tiles (date timestamp, x integer, y integer, zoom integer, tile blob)')
self.execute('CREATE INDEX IF NOT EXISTS tile_index ON tiles (x, y, zoom)')
self.commit()
def __retrieve(self, x, y, zoom):
# private, return the row including rowid,date
self.execute("SELECT rowid,date FROM tiles WHERE x = ? AND y = ? AND zoom = ?", x, y, zoom)
return self.cursor.fetchone()
def __retrieve_full(self, x, y, zoom):
# private, return the row including rowid,date,tile_blob
self.execute("SELECT rowid,date,tile FROM tiles WHERE x = ? AND y = ? AND zoom = ?", x, y, zoom)
return self.cursor.fetchone()
def exists(self, x, y, zoom):
row = self.__retrieve(x, y, zoom)
return (False, None) if row is None else (True, row[1])
def retrieve(self, x, y, zoom):
row = self.__retrieve_full(x, y, zoom)
if row is None:
return (False, None, None)
else:
img = create_image_from_blob(row[2])
return (True, row[1], img)
def retrieve_buffer(self, x, y, zoom):
row = self.__retrieve_full(x, y, zoom)
if row is None:
return (False, None, None)
else:
return (True, row[1], row[2])
def update(self, date, x, y, zoom, tile_buffer):
row = self.__retrieve(x, y, zoom)
if row is not None:
self.execute("DELETE FROM tiles WHERE rowid = ?", row[0])
if date is None:
date = int(math.trunc(time()))
self.execute("INSERT INTO tiles VALUES (?,?,?,?,?)", date, x, y, zoom, tile_buffer)
def delete(self, x, y, zoom):
row = self.__retrieve(x, y, zoom)
if row is not None:
self.execute("DELETE FROM tiles WHERE rowid = ?", row[0])
return True
def count_tiles(self, zooms):
R = 0
for zoom in zooms:
self.execute('SELECT COUNT(*) FROM tiles WHERE zoom = ?', zoom)
r = self.cursor.fetchall()
R += r[0][0]
return R
def list_tiles(self, zooms):
R = []
for zoom in zooms:
self.execute('SELECT x,y,zoom FROM tiles WHERE zoom = ?', zoom)
R.extend(self.cursor.fetchall())
return R
class RmapsDatabase(SqliteDatabase):
def __init__(self, db_name, tile_format, url_template):
SqliteDatabase.__init__(self, db_name, tile_format, url_template)
self.execute('CREATE TABLE IF NOT EXISTS android_metadata (locale text)')
self.execute('CREATE TABLE IF NOT EXISTS tiles (x integer, y integer, z integer, s integer, image blob)')
self.execute('CREATE INDEX IF NOT EXISTS IND ON tiles (x, y, z, s)')
self.execute('CREATE TABLE IF NOT EXISTS info (minzoom integer, maxzoom integer)')
self.execute("SELECT locale FROM android_metadata")
row = self.cursor.fetchone()
if row is None:
self.execute("INSERT INTO android_metadata VALUES (?)", '',)
self.execute("INSERT INTO info VALUES (?,?)", 1, 17)
self.commit()
def __retrieve(self, x, y, zoom):
# private, return the row including rowid
self.execute("SELECT rowid FROM tiles WHERE x = ? AND y = ? AND z = ?", x, y, 17 - zoom)
return self.cursor.fetchone()
def __retrieve_full(self, x, y, zoom):
# private, return the row including rowid,tile_blob
self.execute("SELECT rowid,image FROM tiles WHERE x = ? AND y = ? AND z = ?", x, y, 17 - zoom)
return self.cursor.fetchone()
def exists(self, x, y, zoom):
row = self.__retrieve(x, y, zoom)
return (row is not None), None
def retrieve(self, x, y, zoom):
row = self.__retrieve_full(x, y, zoom)
if row is None:
return False, None, None
else:
img = create_image_from_blob(row[1])
return True, None, img
def retrieve_buffer(self, x, y, zoom):
row = self.__retrieve_full(x, y, zoom)
if row is None:
return False, None, None
else:
return True, None, row[1]
def update(self, date, x, y, zoom, tile):
row = self.__retrieve(x, y, zoom)
if row is not None:
self.execute("DELETE FROM tiles WHERE rowid = ?", row[0])
self.execute("INSERT INTO tiles VALUES (?,?,?,?,?)", x, y, 17 - zoom, 0, tile)
def delete(self, x, y, zoom):
row = self.__retrieve(x, y, zoom)
if row is not None:
self.execute("DELETE FROM tiles WHERE rowid = ?", row[0])
return True
def count_tiles(self, zooms):
R = 0
for zoom in zooms:
self.execute('SELECT COUNT(*) FROM tiles WHERE z = ?', 17 - zoom)
r = self.cursor.fetchall()
R += r[0][0]
return R
def list_tiles(self, zooms):
R = []
for zoom in zooms:
self.execute('SELECT x,y,z FROM tiles WHERE z = ?', 17 - zoom)
rows = self.cursor.fetchall()
R.extend([(x, y, zoom) for (x, y, z) in rows])
return R
class FolderDatabase(TileDatabase):
def __init__(self, db_name, tile_format, url_template):
TileDatabase.__init__(self, db_name, tile_format, url_template)
def filename(self, x, y, zoom):
return os.path.join(self.fullname,
str(zoom), str(x), str(y) + '.' + self.tile_ext())
def exists(self, x, y, zoom):
filename = self.filename(x, y, zoom)
if os.path.exists(filename):
return True, int(math.trunc(os.path.getmtime(filename)))
else:
return False, None
def retrieve(self, x, y, zoom):
filename = self.filename(x, y, zoom)
if os.path.exists(filename):
try:
img = Image.open(filename)
return True, int(math.trunc(os.path.getmtime(filename))), img
except:
return None, None, None
else:
return False, None, None
def retrieve_buffer(self, x, y, zoom):
filename = self.filename(x, y, zoom)
if os.path.exists(filename):
try:
with open(filename, 'rb') as f:
buffer = f.read()
return True, int(math.trunc(os.path.getmtime(filename))), buffer
except:
return None, None, None
else:
return False, None, None
def update(self, date, x, y, zoom, tile):
filename = self.filename(x, y, zoom)
path = os.path.dirname(filename)
try:
if not os.path.exists(path):
os.makedirs(path)
with open(filename, 'wb') as f:
f.write(tile)
if date is not None:
try:
os.utime(filename, (date, date))
except:
# utime does not work under android, avoid the error message
pass
except:
error('unable to save ' + filename)
def delete(self, x, y, zoom):
filename = self.filename(x, y, zoom)
if os.path.exists(filename):
try:
os.remove(filename)
return True
except WindowsError as e:
return False
else:
return True
def regexp_filename(self):
re_path = r'[^\d](\d+)[^\d](\d+)[^\d]'
re_name = r'(\d+)\.%s$' % self.tile_ext()
return re_path + re_name
def list_tiles(self, zooms):
regexp = re.compile(self.regexp_filename())
R = []
for zoom in zooms:
path = os.path.join(self.fullname, str(zoom))
for root, dirs, files in os.walk(path):
if files:
for filename in files:
fullname = os.path.join(root, filename)
m = regexp.search(fullname)
if m:
zoom, x, y = m.group(1,2,3)
zoom, x, y = int(zoom), int(x), int(y)
R.append((x, y, zoom))
return R
def count_tiles(self, zooms):
regexp = re.compile(self.regexp_filename())
R = 0
for zoom in zooms:
path = os.path.join(self.fullname, str(zoom))
for root, dirs, files in os.walk(path):
if files:
for file in files:
fullname = os.path.join(root, file)
m = regexp.search(fullname)
if m:
R += 1
return R
def pack(self):
for _ in (1, 2):
for root, dirs, files in os.walk(self.fullname):
if not dirs and not files:
os.rmdir(root)
class MaverickDatabase(FolderDatabase):
def __init__(self, db_name, tile_format, url_template):
FolderDatabase.__init__(self, db_name, tile_format, url_template)
def filename(self, x, y, zoom):
return FolderDatabase.filename(self, x, y, zoom) + '.tile'
def regexp_filename(self):
re_path = r'[^\d](\d+)[^\d](\d+)[^\d]'
re_name = r'(\d+)\.%s\.tile$' % self.tile_ext()
return re_path + re_name
# persistence of database properties
class DatabaseProperties:
def __init__(self, db_name):
norm_name = os.path.normpath(db_name)
self.db_name = os.path.split(norm_name)[-1]
self.filename = norm_name + '.properties'
self.dirname = os.path.dirname(self.filename)
self.section = 'tile_database_properties'
self.warning = '; This file has been created by %s.\n' % APPNAME
self.parser = configparser.ConfigParser(allow_no_value=True)
self.parser.add_section(self.section)
def get(self):
if not os.path.isfile(self.filename):
return None, None, None
else:
self.parser.read(self.filename)
return (self.parser.get(self.section, 'db_format'),
self.parser.get(self.section, 'tile_format'),
self.parser.get(self.section, 'url_template'))
def set(self, db_format, tile_format, url_template):
self.parser.set(self.section, 'db_name', self.db_name)
self.parser.set(self.section, 'db_format', db_format)
self.parser.set(self.section, 'url_template', url_template)
self.parser.set(self.section, 'tile_format', tile_format)
if self.dirname and not os.path.exists(self.dirname):
os.makedirs(self.dirname)
try:
with open(self.filename, 'w') as f:
f.write(self.warning)
self.parser.write(f)
except Exception as e:
error('unable to write ' + self.filename + ' : ' + e)
# database factory
def db_factory(db_name):
db_format, tile_format, url_template = DatabaseProperties(db_name).get()
if db_format is None:
error('tile database format is not declared. Use -describe to describe database.')
elif db_format == 'KAHELO':
return KaheloDatabase(db_name, tile_format, url_template)
elif db_format == 'RMAPS':
return RmapsDatabase(db_name, tile_format, url_template)
elif db_format == 'FOLDER':
return FolderDatabase(db_name, tile_format, url_template)
elif db_format == 'MAVERICK':
return MaverickDatabase(db_name, tile_format, url_template)
else:
error('unknown tile database format')
# -- Traces ------------------------------------------------------------------
class TileCounters:
# helper class
def __init__(self):
self.ignored = 0
self.inserted = 0
self.available = 0
self.expired = 0
self.deleted = 0
self.missing = 0
self.failure = 0
def tile_trace(options, x, y, zoom, index, size, msg):
if options.verbose:
tile_message(x, y, zoom, index, size, msg)
elif options.quiet:
pass
else:
num = index + 1
pc1 = 100.0 * (num - 1) / size
pc2 = 100.0 * num / size
pc3 = math.floor(pc2)
if pc1 < pc3:
print('Tiles %.0f%% (%d/%d)' % (pc3, num, size))
def tile_message(x, y, zoom, index, size, msg):
print('Tile (%d,%d,%d) %d/%d: %s' % (x, y, zoom, index+1, size, msg))
def display_report(options, *entries):
print('-' * 29)
entries = list(entries)
entries.append(('Elapsed time', strftime("%H:%M:%S", gmtime(time() - options.start_time))))
for caption, value in entries:
try:
v = '{:,}'.format(value)
except:
v = value
print('%-16s %12s' % (caption, v))
def decsep(n):
return '{:,}'.format(n)
# -- Insertion strategies ----------------------------------------------------
#
# used by -insert and -import/-export
# import actions
NOP, INSERT, LATEST = range(3)
# force mode: insert if something available
FORCE_MODE = (
# dst missing no date in dst date in dst date in dst
# expired valide
( NOP, NOP, NOP, NOP), # src missing
( INSERT, INSERT, INSERT, INSERT), # no date in src
( INSERT, INSERT, INSERT, INSERT)) # date available
# update mode: insert if missing in destination or latest, ignore if unable to compare date
UPDATE_MODE = (
# dst missing no date in dst date in dst date in dst
# expired valide
( NOP, NOP, NOP, NOP), # src missing
( INSERT, NOP, INSERT, NOP), # no date in src
( INSERT, NOP, LATEST, LATEST)) # date available
def insert_strategy(options, strategy, exists_src, date_src, exists_dst, date_dst):
case_src = 0 if not exists_src else (1 if date_src is None else 2)
case_dst = 0 if not exists_dst else (1 if date_dst is None else 2)
if case_dst == 2:
case_dst = 2 if date_dst <= options.database.expiry_date else 3
action = strategy[case_src][case_dst]
if action == LATEST:
return date_src > date_dst
else:
return action == INSERT
def should_insert(options, exists_src, date_src, exists_dst, date_dst):
if options.force_insert:
return insert_strategy(options, FORCE_MODE, exists_src, date_src, exists_dst, date_dst)
else:
return insert_strategy(options, UPDATE_MODE, exists_src, date_src, exists_dst, date_dst)
# -- Commands ----------------------------------------------------------------
# -version : version number --------------------------------------------------
def print_version():
print(IDENTITY)
print(APPNAME, VERSION)
# -license : display text of license -----------------------------------------
def print_license():
print(IDENTITY)
print(APPNAME, VERSION)
print()
print(LICENSE)
# -help : print help ---------------------------------------------------------
def print_help():
ArgumentParser().print_help()
def do_helphtml():
if os.path.isfile(APPNAME + '.html'):
helpfile = APPNAME + '.html'
else:
helpfile = r'http://kahelo.godrago.net/kahelo.html'
webbrowser.open(helpfile, new=2)
# -describe: set and display database properties -----------------------------
def do_describe(db_name, options):
db_format, tile_format, url_template = DatabaseProperties(db_name).get()
if options.db_format is not None:
db_format = options.db_format
if options.tile_format is not None:
tile_format = options.tile_format
if options.url_template is not None:
url_template = options.url_template
DatabaseProperties(db_name).set(db_format, tile_format, url_template)
print('db_name ', db_name)
print('db_format ', db_format)
print('tile_format ', tile_format)
print('url_template', url_template)
# -count : number of tiles for source and zoom -------------------------------
def do_count(db_name, options):
size, inserted, expired, missing = count(db_name, options)
display_report(options, ('Tiles in set', size),
('Up to date', inserted),
('Expired', expired),
('Missing', missing))
return size, inserted, expired, missing
def count(db_name, options):
db = db_factory(db_name)
tiles = tileset(options, db, db_filter=options.inside)
n = tiles.size()
inserted = 0
expired = 0
for index, (x, y, zoom) in enumerate(tiles):
exists, date = db.exists(x, y, zoom)
if exists:
if date is None or date > options.database.expiry_date:
inserted += 1
msg = 'available'
else:
expired += 1
msg = 'expired'
else:
msg = 'missing'
tile_trace(options, x, y, zoom, index, n, msg)
return tiles.size(), inserted, expired, tiles.size() - inserted - expired
# -insert : download of tiles and insertion in database ----------------------
def do_insert(db_name, options):
db = db_factory(db_name)
tiles = tileset(options, db, db_filter=options.inside)
n = tiles.size()
counters = TileCounters()
for index, (x, y, zoom) in enumerate(tiles):
insert_tile(tiles, db, options, x, y, zoom, index, n, counters)
db.commit()
if options.verbose:
print('Commit.')
display_report(options, ('Tiles in set', n),
('Already present', counters.ignored),
('Inserted', counters.inserted),
('Missing', counters.missing))
def insert_tile(tiles, db, options, x, y, zoom, index, n, counters):
exists_dst, date_dst = db.exists(x, y, zoom)
exists_src, date_src = True, None
if not should_insert(options, exists_src, date_src, exists_dst, date_dst):
counters.ignored += 1
tile_trace(options, x, y, zoom, index, n, 'already in database')
elif counters.inserted >= options.insert.session_max:
counters.missing += 1
else:
sleep(options.insert.request_delay)
for i in range(options.insert.number_of_attempts):
url = tile_url(options, db, x, y, zoom)
try:
# no proxy handling...
u = requests.urlopen(url, timeout=options.insert.timeout)
tile_buffer = u.read()
u.close()
break
except urllib_error.HTTPError as e:
if e.code == 404:
counters.missing += 1
tile_trace(options, x, y, zoom, index, n, '%s : not found' % url)
return
else:
tile_trace(options, x, y, zoom, index, n, '%s : connection error %d - %d' % (url, i+1, e.code))
except Exception as e:
tile_trace(options, x, y, zoom, index, n, '%s : Exception connection error %d - %s' % (url, i+1, e))
else:
counters.missing += 1
return
if db.tile_format() == 'SERVER':
pass
else:
try:
tile_image = create_image_from_blob(tile_buffer)
tile_buffer = create_blob_from_image(tile_image,
db.tile_format(),
options.tiles.jpeg_quality)
except Exception as e:
tile_trace(options, x, y, zoom, index, n, 'image conversion error open ' + str(e))
counters.missing += 1
return
db.update(int(math.floor(time())), x, y, zoom, tile_buffer)
counters.inserted += 1
msg = 'updated' if exists_dst else 'inserted'
tile_trace(options, x, y, zoom, index, n, '%s : %s' % (url, msg))
if counters.inserted % options.database.commit_period == 0:
db.commit()
if options.verbose:
print('Commit.')
def tile_url(options, db, x, y, zoom):
template = db.url_template()
if template is None or template == '':
error('unknown server url template, use -describe to supply.')
url = template
url = url.replace('{x}', str(x))
url = url.replace('{y}', str(y))
url = url.replace('{z}', str(zoom))
url = url.replace('{zoom}', str(zoom))
m = re.search(r'\[(.*)\]', template)
if m:
stripes = m.group(1)
url = url.replace('[' + stripes + ']', stripes[random.randint(0, len(stripes) - 1)])
return url
# -import : import tiles from tile database ----------------------------------
def do_import(db_name, options):
if options.db_source is None:
error('source database must be given')
db_arg = db_factory(db_name)
db_src = db_factory(options.db_source)
tiles = tileset(options, db_arg, db_filter=options.inside)
import_tiles(options, db_src, db_arg, tiles)
def import_tiles(options, db_src, db_dst, tiles):
n = tiles.size()
counters = TileCounters()
for index, (x, y, zoom) in enumerate(tiles):
import_tile(tiles, db_dst, x, y, zoom, options, index, n, counters, db_src)
db_dst.commit()
display_report(options, ('Tiles in set', n),
('Already present', counters.ignored),
('Inserted', counters.inserted),
('Missing', counters.missing))
def import_tile(tiles, db_dst, x, y, zoom, options, index, n, counters, db_src):
exists_dst, date_dst = db_dst.exists(x, y, zoom)
exists_src, date_src = db_src.exists(x, y, zoom)
if not exists_src:
counters.missing += 1
tile_trace(options,x, y, zoom, index, n, 'missing in source')
return
if not should_insert(options, exists_src, date_src, exists_dst, date_dst):
counters.ignored += 1
tile_trace(options,x, y, zoom, index, n, 'source ignored')
return
# retrieve from source, tile is a PIL image
exists_src, date_src, tile = db_src.retrieve(x, y, zoom)
if exists_src is None:
counters.missing += 1
tile_trace(options, x, y, zoom, index, n, 'source unreadable')
return
# prepare drawing
if date_src is not None and date_src > options.database.expiry_date:
color = options.tiles.border_valid_color
else:
color = options.tiles.border_expired_color
tile = tile.convert('RGBA')
# draw tile width if requested
if options.Import.draw_tile_width:
tile = draw_tile_width(x, y, zoom, tile, color)
# draw tile border if requested
if options.Import.draw_tile_limits:
tile = draw_alpha_border(tile, color)
# convert to destination tile format
tile = create_blob_from_image(tile, db_dst.tile_format(), options.tiles.jpeg_quality)
db_dst.update(date_src, x, y, zoom, tile)
if index % options.database.commit_period == 0:
db_dst.commit()
counters.inserted += 1
if exists_dst:
tile_trace(options, x, y, zoom, index, n, 'updated')
else:
tile_trace(options, x, y, zoom, index, n, 'inserted')
# -export : export tiles to tile database ------------------------------------
def do_export(db_name, options):
options.db_name, options.db_source = options.db_dest, options.db_name
do_import(options.db_name, options)
def do_export(db_name, options):
if options.db_dest is None:
error('destination database must be given')
db_arg = db_factory(db_name)
db_dst = db_factory(options.db_dest)
tiles = tileset(options, db_arg, db_filter=options.inside)
import_tiles(options, db_arg, db_dst, tiles)
# -delete: delete tiles from database ----------------------------------------
def do_delete(db_name, options):
db = db_factory(db_name)
tiles = tileset(options, db, db_filter=options.inside)
size = tiles.size()
counters = TileCounters()
for index, (x, y, zoom) in enumerate(tiles):
delete_tile(tiles, db, x, y, zoom, options, index, size, counters)
db.commit()
db.pack()
display_report(options, ('Tiles in set', size),
('Deleted', counters.deleted),
('Failure', counters.failure),
('Missing', counters.missing))
def delete_tile(tiles, db, x, y, zoom, options, index, size, counters):
exists, date = db.exists(x, y, zoom)
if not exists:
counters.missing += 1
tile_trace(options,x, y, zoom, index, size, 'missing')
else:
if db.delete(x, y, zoom):
counters.deleted += 1
tile_trace(options, x, y, zoom, index, size, 'deleted')
else:
counters.failure += 1
tile_trace(options, x, y, zoom, index, size, 'failed to remove')
if index % options.database.commit_period == 0:
db.commit()
# -view : make image from gpx ------------------------------------------------
def do_makeview(db_name, options):
db = db_factory(db_name)
generator, source, zoom, radius = options_generate(options)
if len(zoom) > 1:
error('view does not apply to multiple zoom levels')
else:
zoom = zoom[0]
tiles = tileset(options, db, db_filter=options.inside)
n = tiles.size()
counters = TileCounters()
if n == 0:
error('no tiles to display')
x0, y0, x1, y1 = tiles.binding_box()
nx = x1 - x0 + 1
ny = y1 - y0 + 1
max_dim = max(nx, ny) * 256
if max_dim <= options.view.max_dim:
tile_width = 256
else:
tile_width = int(256.0 * options.view.max_dim / max_dim)
if tile_width == 0:
error('too many tiles for image size')
# create image
mosaic = Image.new('RGB', (nx * tile_width, ny * tile_width), options.tiles.background_color)
draw = ImageDraw.Draw(mosaic)
# draw tiles
for index, (x, y, z) in enumerate(tiles):
makeview_tile(tiles, db, mosaic, draw, tile_width, x0, y0, x, y, zoom, options, index, n, counters)
# draw points at track coordinates
if options.view.draw_points and not options.db_tiles:
points_tu = track_points(source, zoom, options)
for x, y in points_tu:
X, Y = int((x - x0) * tile_width), int((y - y0) * tile_width)
draw.rectangle((X-2, Y-2, X + 2, Y + 2), fill=(255,0,0))
# draw track
if options.view.draw_tracks and not options.db_tiles:
draw_tracks(options, draw, source, x0, y0, zoom, tile_width)
# draw circles
if options.view.draw_circles and not options.db_tiles:
points_tu = track_points(source, zoom, options)
radius_km = options.radius
if radius_km is None:
x, y = points_tu[0]
radius_km = default_radius(x, y, zoom)
if radius_km > 0:
radius_tu = tile_hdistance_tu(x, y, zoom, radius_km)
for x, y in points_tu:
X, Y = int((x - x0) * tile_width), int((y - y0) * tile_width)
d = radius_tu * tile_width
draw.ellipse((X-d, Y-d, X + d, Y + d))
# save image and display if required
try:
if options.image is None:
imagename = APPNAME + '-view-image.jpg'
mosaic.save(imagename)
webbrowser.open(imagename, new=2)
else:
imagename = options.image
mosaic.save(imagename)
except Exception as e:
print(e)
error('error saving image ' + imagename)
display_report(options, ('Tiles in set', n),
('Displayed', counters.available),
('Missing', counters.missing))
def makeview_tile(tiles, db, mosaic, draw, tile_width, x0, y0, x, y, zoom, options, index, n, counters):
exists, date, tile = db.retrieve(x, y, zoom)
if not exists:
msg = 'missing'
counters.missing += 1
color = options.tiles.border_valid_color
elif date is None:
msg = 'pasted'
counters.available += 1
color = options.tiles.border_valid_color
elif date <= options.database.expiry_date:
msg = 'pasted, expired'
counters.expired += 1
color = options.tiles.border_expired_color
else:
msg = 'pasted'
counters.available += 1
color = options.tiles.border_valid_color
X, Y = (x - x0) * tile_width, (y - y0) * tile_width
if exists:
if options.view.true_tiles:
img = resize_image(options, tile, tile_width)
else:
img = Image.new('RGBA', (tile_width, tile_width), options.tiles.ghost_tile_color)
else:
if options.view.draw_upper_tiles:
img = upper_tile_image(db, x, y, zoom)
if img:
img = resize_image(options, img, tile_width)
else:
img = None
else:
img = None
if img is None:
draw.rectangle((X, Y, X + tile_width, Y + tile_width),
fill=options.tiles.missing_tile_color, outline=color)
else:
# draw tile width if requested
if options.view.draw_tile_width:
img = draw_tile_width(x, y, zoom, img, color)
# draw tile border if requested
if options.view.draw_tile_limits:
img = draw_alpha_border(img, color)
# paste on full image
mosaic.paste(img, (X, Y))
tile_trace(options, x, y, zoom, index, n, msg)
# -server: http tile server --------------------------------------------------
class HTTPServerBest(HTTPServer):
_continue = True
def serve_until_shutdown(self):
while self._continue:
self.handle_request()
def shutdown(self):
self._continue = False
# We fire a last request at the server in order to take it out of the
# while loop in `self.serve_until_shutdown`.
try:
requests.urlopen(
'http://%s:%s/' % (self.server_name, self.server_port))
except urllib_error.URLError as e:
# If the server is already shut down, we receive a socket error,
# which we ignore.
pass
self.server_close()
class HTTPServerLayer(object):
host = '127.0.0.1'
port = 80
def start_server(self, db_name):
global db
db = db_factory(db_name)
self.server = HTTPServerBest((self.host, self.port), TileServerHTTPRequestHandler)
self.server_thread = threading.Thread(
target=self.server.serve_until_shutdown)
self.server_thread.daemon = True
self.server_thread.start()
# Wait a little as it sometimes takes a while to get the server
# started.
sleep(0.25)
def stop_server(self):
if self.server is None:
return
self.server.shutdown()
self.server_thread.join(30) # 30 -> timeout
def do_server(db_name, port, options):
server = kahelo.HTTPServerLayer()
server.start_server(db_name)
return server
def stop_server(server):
server.stop_server()
class TileServerHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
global db
try:
m = re.search(r'/(\d+)/(\d+)/(\d+)\.jpg', self.path)
if not m:
raise IOError
zoom, x, y = m.group(1,2,3)
zoom, x, y = int(zoom), int(x), int(y)
exists, date, img = db.retrieve(x, y, zoom)
#print(x, y, zoom, exists)
if not exists:
raise IOError
self.send_response(200)
self.send_header('Content-type','image/jpeg')
self.end_headers()
self.wfile.write(create_blob_from_image(img, 'JPG'))
return
except IOError:
self.send_error(404, 'file not found')
# -stat : database statistics ------------------------------------------------
def do_statistics(db_name, options):
db = db_factory(db_name)
tiles = tileset(options, db, db_filter=options.inside)
n = tiles.size()
maxzoomp1 = MAXZOOM + 1
sizes = []
size = [[] for _ in range(maxzoomp1)]
xmin = [2 ** maxzoomp1] * maxzoomp1
ymin = [2 ** maxzoomp1] * maxzoomp1
xmax = [0] * maxzoomp1
ymax = [0] * maxzoomp1
for index, (x, y, zoom) in enumerate(tiles):
exists, date, buffer = db.retrieve_buffer(x, y, zoom)
if exists:
sizes.append(len(buffer))
size[zoom].append(len(buffer))
if x < xmin[zoom]: xmin[zoom] = x
if y < ymin[zoom]: ymin[zoom] = y
if x > xmax[zoom]: xmax[zoom] = x
if y > ymax[zoom]: ymax[zoom] = y
tile_trace(options, x, y, zoom, index, n, 'counted')
else:
pass
display_report(options)
print('-' * 29)
print('%4s %6s %6s %6s %8s %12s (sizes in byte)' % ('zoom', 'count', 'min', 'max', 'average', 'total'))
for zoom in [z for z,v in enumerate(size) if len(v) > 0]:
slen = decsep(len(size[zoom]))
smin = decsep(min(size[zoom]))
smax = decsep(max(size[zoom]))
smean = decsep(sum(size[zoom]) // len(size[zoom]))
stot = decsep(sum(size[zoom]))
print('%4d %6s %6s %6s %8s %12s' % (zoom, slen, smin, smax, smean, stot))
if len(sizes) == 0:
slen, smin, smax, smean, stot = [0] * 5
else:
slen = decsep(len(sizes))
smin = decsep(min(sizes))
smax = decsep(max(sizes))
smean = decsep(sum(sizes) // len(sizes))
stot = decsep(sum(sizes))
print('%4s %6s %6s %6s %8s %12s' % ('all', slen, smin, smax, smean, stot))
print('-' * 29)
print('%4s %6s %6s %6s %6s (boxing area in tile units)' % ('zoom', 'x min', 'y min', 'x max', 'y max'))
for zoom in [z for z,v in enumerate(xmin) if v < 2 ** maxzoomp1]:
print('%4d %6d %6d %6d %6d' % (zoom, xmin[zoom], ymin[zoom], xmax[zoom], ymax[zoom]))
print('-' * 29)
print('%4s %11s %11s %11s %11s (boxing area in degrees)' % ('zoom', 'lat min', 'long min', 'lat max', 'long max'))
for zoom in [z for z,v in enumerate(xmin) if v < 2 ** maxzoomp1]:
lat_min, lon_min = tile2deg(xmin[zoom], ymin[zoom], zoom)
lat_max, lon_max = tile2deg(xmax[zoom], ymax[zoom], zoom)
print('%4d %11.6f %11.6f %11.6f %11.6f' % (zoom, lat_min, lon_min, lat_max, lon_max))
# -- Image and drawing helpers -----------------------------------------------
def create_image_from_blob(blob):
# blob is a string containing an entire image file
if sys.version_info < (3,):
return Image.open(StringIO.StringIO(blob))
else:
return Image.open(io.BytesIO(blob))
def create_blob_from_image(img, format, jpeg_quality=85):
# img is a PIL image
# return buffer of image with requested format
# result is a buffer object or a bytes-like object
if sys.version_info < (3,):
stringIO = StringIO.StringIO()
save_image(img, stringIO, format, jpeg_quality)
return buffer(stringIO.getvalue())
else:
bytesIO = io.BytesIO()
save_image(img, bytesIO, format, jpeg_quality)
return bytesIO.getvalue()
def save_image(img, target, format, jpeg_quality=85):
# img is a PIL image
# target is filename or StringIO/BytesIO
if format == 'JPG':
save_image_to_jpg(img, target, jpeg_quality)
elif format == 'PNG':
save_image_to_png8(img, target)
else:
error('image format %s is not handled' % format)
def save_image_to_jpg(img, target, jpeg_quality=85):
img.convert('RGB').save(target, 'JPEG', optimize=True, quality=jpeg_quality)
def save_image_to_png(img, target):
img.save(target, 'PNG', optimize=True)
def save_image_to_png8(img, target):
# convert('RGB').convert('P') seems necessary
img = img.convert('RGB').convert('P', palette=Image.ADAPTIVE, colors=256)
img.save(target, 'PNG', colors=256)
def save_image_to_png4(img, target):
# convert('RGB').convert('P') seems necessary
img = img.convert('RGB').convert('P', palette=Image.ADAPTIVE, colors=16)
img.save(target, 'PNG', colors=16)
def draw_alpha_border(tile, color):
# V1
def draw_alpha_border1(tile, color):
draw = ImageDraw.Draw(tile, mode='RGBA')
draw.rectangle((0, 0, tile.size[0]-1, tile.size[1]-1), outline=color)
return tile
# V2
def draw_alpha_border2(tile, color):
# draw square with border color
border = Image.new('RGB', tile.size, color)
# draw mask with border color and alpha
mask = Image.new('RGBA', tile.size, (0,0,0))
draw = ImageDraw.Draw(mask, mode='RGBA')
draw.rectangle((0, 0, tile.size[0]-1, tile.size[1]-1), outline=color)
return Image.composite(tile, border, mask)
return draw_alpha_border2(tile, color)
def draw_alpha_text(tile, text, color):
# draw square with text color
border = Image.new('RGB', tile.size, color)
# draw mask with text color and alpha
mask = Image.new('RGBA', tile.size, (0,0,0))
draw = ImageDraw.Draw(mask, mode='RGBA')
draw.text((2, 0), text, color)
return Image.composite(tile, border, mask)
def draw_tile_width(x, y, zoom, tile, color):
w = tile_distance_km(x, y, x + 1, y, zoom)
if w < 10:
dec = 3
elif w < 1000:
dec = 1
else:
dec = 0
return draw_alpha_text(tile, '%.*f' % (dec, w), color)
def resize_image(options, img, width):
if options.view.antialias is False:
img = img.resize((width, width), Image.NEAREST)
else:
img = img.convert('RGB')
img = img.resize((width, width), Image.ANTIALIAS)
return img
def upper_tile_image(db, x, y, zoom):
tile = db.upper_tile(x, y, zoom)
if tile is None:
return None
else:
ux, uy, uz = tile
exists, date, img = db.retrieve(ux, uy, uz)
# compute coordinates
scale = 2 ** (zoom - uz)
w = 256 / scale
x2 = (x % scale) * w
y2 = (y % scale) * w
# crop sub-image
subimg = img.crop((x2, y2, x2 + w, y2 + w))
# scale sub-image to tile image
newimg = subimg.resize((256, 256), Image.NEAREST)
# done
return newimg
def draw_tracks(options, draw, source, x0, y0, zoom, tile_width):
segments = track_segments(source, zoom, options)
if options.track:
for index, segment in enumerate(segments[:-1]):
next = index + 1
segment.append(segments[next][0])
elif options.tracks:
pass
elif options.contour:
nseg = len(segments)
for index, segment in enumerate(segments):
next = (index + 1) % nseg
segment.append(segments[next][0])
elif options.contours:
for segment in segments:
segment.append(segment[0])
elif options.project:
# does not link segments in project but should fo consistancy
pass
else:
return
fill = options.tiles.track_color[0:3]
width = options.tiles.track_color[3]
for segment in segments:
seg = ((int((x - x0) * tile_width), int((y - y0) * tile_width)) for x, y in segment)
draw.line(sum(seg, ()), fill=fill, width=width)
# -- Main --------------------------------------------------------------------
def kahelo(argstring=None):
try:
start = time()
options = ArgumentParser().parse_args(argstring)
read_config(options)
options.start_time = start
r = apply_command(options)
return r
except KeyboardInterrupt:
print('\n** Interrupted by user.\n')
except CustomException:
pass
finally:
pass
if __name__ == "__main__":
kahelo()
# --
|
trezor.py
|
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum_plcu.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum_plcu.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum_plcu import constants
from electrum_plcu.i18n import _
from electrum_plcu.plugin import Device, runs_in_hwd_thread
from electrum_plcu.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_plcu.keystore import Hardware_KeyStore
from electrum_plcu.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum_plcu.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable, OutdatedHwFirmwareException,
get_xpubs_and_der_suffixes_from_txinout)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from trezorlib.transport.bridge import BridgeTransport, call_bridge
from .clientbase import TrezorClientBase
from trezorlib.messages import (
Capability, BackupType, RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
from trezorlib.client import PASSPHRASE_ON_DEVICE
TREZORLIB = True
except Exception as e:
if not (isinstance(e, ModuleNotFoundError) and e.name == 'trezorlib'):
_logger.exception('error importing trezor plugin deps')
TREZORLIB = False
class _EnumMissing:
def __init__(self):
self.counter = 0
self.values = {}
def __getattr__(self, key):
if key not in self.values:
self.values[key] = self.counter
self.counter += 1
return self.values[key]
Capability = _EnumMissing()
BackupType = _EnumMissing()
RecoveryDeviceType = _EnumMissing()
PASSPHRASE_ON_DEVICE = object()
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
plugin: 'TrezorPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None:
raise UserFacingException(_('Missing previous tx.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
backup_type: int = BackupType.Bip39
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://pypi.org/project/trezor/'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 12, 0)
maximum_library = (0, 13)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
self._is_bridge_available = None
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
@runs_in_hwd_thread
def is_bridge_available(self) -> bool:
# Testing whether the Bridge is available can take several seconds
# (when it is not), as it is slow to timeout, hence we cache it.
if self._is_bridge_available is None:
try:
call_bridge("enumerate")
except Exception:
self._is_bridge_available = False
# never again try with Bridge due to slow timeout
BridgeTransport.ENABLED = False
else:
self._is_bridge_available = True
return self._is_bridge_available
@runs_in_hwd_thread
def enumerate(self):
# If there is a bridge, prefer that.
# On Windows, the bridge runs as Admin (and Electrum usually does not),
# so the bridge has better chances of finding devices. see #5420
# This also avoids duplicate entries.
if self.is_bridge_available():
devices = BridgeTransport.enumerate()
else:
devices = trezorlib.transport.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
@runs_in_hwd_thread
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['TrezorClientBase']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Plcultima"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.").format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RecoveryDeviceType.ScrambledWords:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 20: 128, 24: 256, 33: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
backup_type=settings.backup_type,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RecoveryDeviceType.Matrix:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise OutdatedHwFirmwareException(msg)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub('m', 'standard', creating=is_creating_wallet))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
prev_tx = {bfh(txhash): self.electrum_tx_to_txtype(tx) for txhash, tx in prev_tx.items()}
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
details = SignTx(lock_time=tx.locktime, version=tx.version)
signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'TrezorKeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_trezor_input_script_type(txin.script_type)
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n = full_path
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'TrezorKeyStore'):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
t.inputs = self.tx_inputs(tx)
t.bin_outputs = [
TxOutputBinType(amount=o.value, script_pubkey=o.scriptpubkey)
for o in tx.outputs()
]
return t
|
controlbox.py
|
import matplotlib.pyplot as plt
# import numpy as np
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import PySimpleGUI as sg
import matplotlib
import serial as ser
from serial import SerialException
from time import sleep
import glob
import os
# from pathlib import Path
from threading import Thread
from configparser import ConfigParser
import sys
import signal
import time
import datetime
def command(l):
print('SEND: '+l)
s.write(l.encode('utf-8'))
s.write(b'\r\n')
return receive(1)
def send(l):
s.write(l.encode('utf-8'))
#s.write(b'\r\n')
def receive(end): ## end will signal when to die
global busy, recibido
while True:
line = s.readline().decode()
recibido += line
if line.strip() == end:
busy = False
with open(file,'w') as f:
for i in recibido.splitlines():
if len(i)>1:
f.write(i.replace('.',',')+'\n')
f.close()
return
def update(): # send elon & force values to Arduino
global recibido, MaximumElongation, MaxPushForce
send('L'+str(MaximumElongation))
line = s.readline().decode()
recibido += line
window['box'].update(recibido)
send('F'+str(MaxPushForce))
line = s.readline().decode()
recibido += line
window['box'].update(recibido)
config = ConfigParser()
found = config.read('defaults.ini')
if len(found):
# Read config parameter from INI file
print("INI file: "+str(found[0]))
port = config.get('SerialPort','COM')
ComSpeed = config.getint('SerialPort','BaudRate')
CellScale = config.getfloat('General','CellScale')
MaxPushForce = config.getfloat('General','MaxPushForce')
MaximumElongation = config.getfloat('General','MaximumElongation')
DataDir = config.get('General','DataDir')
layout=[[sg.Text("Serial Port to Arduino:"), sg.Input(port, size=(25, 1), enable_events=True, key="Port"), sg.Button('Connect'),sg.Button('Disconnect')],
[sg.Text('MaxDisplacement (mm)'), sg.Input(MaximumElongation,size=(5,1),key="Elon"), sg.Text('MaxForce (N)'),sg.Input(MaxPushForce,size=(5,1),key="Force"), sg.Button('Set') ],
[sg.Button('Start'), sg.Button('ResetCell'),sg.Button('ManualMeasurement'), sg.Button('STOP',button_color=(None,'red'))],
[sg.Button('StartManualTest'), sg.Text('motor disabled')],
[sg.Multiline('Last measures',size=(40,10),key='box', autoscroll=True,)]]
window = sg.Window('Push Device Control',layout, finalize=True)
window['Disconnect'].update(disabled=True)
recibido='Last measurements\n'
connected = False
busy = False
while True:
if not busy:
windows, event, values = sg.read_all_windows()
else:
windows, event, values = sg.read_all_windows(timeout=200)
window['box'].update(recibido) #values['box']+"Line")
if not connected and event == 'Connect': #################CONNECT!!!!!!!
connected = True
window['Disconnect'].update(disabled=False)
window['Connect'].update(disabled=True)
try:
port = values['Port']
s = ser.Serial(port, baudrate=ComSpeed, timeout=2)
except SerialException:
print("ERROR Opening the Serial Port: "+values['Port'])
event='Exit'
s.close()
break
#sleep(1)
ok = False
for i in range(3):
line=s.readline().strip()
# print(line)
if line == b'ready':
ok = True
break
if not ok:
print('NOT CONNECTED')
event='Exit'
s.close()
break
recibido = 'CONNECTED\n'
window['box'].update(recibido)
update()
if connected and event == 'Disconnect': #######DISCONNECT
connected = False
window['Connect'].update(disabled=False)
window['Disconnect'].update(disabled=True)
s.close()
if event == sg.WIN_CLOSED or event == 'Exit': break
if connected and event == 'STOP': send('X')
if connected and event == 'Start':
file = sg.popup_get_file('Filename to store test data:', save_as = True)
recibido = ''
busy = True
send('S')
thread = Thread(target=receive, args=('.'))
thread.start()
if connected and event == 'ManualMeasurement':
send('?')
line = s.readline().decode()
recibido += line
window['box'].update(recibido)
if event == 'Set':
MaximumElongation = values['Elon']
MaxPushForce = values['Force']
if connected: # if connected then push the values to the Arduino
update()
config['General']['MaxPushForce'] = MaxPushForce
config['General']['MaximumElongation'] = MaximumElongation
config['SerialPort']['COM'] = values['Port']
with open('defaults.ini', 'w') as configfile:
config.write(configfile)
if connected and event == 'StartManualTest':
file = sg.popup_get_file('Filename to store test data:', save_as = True)
recibido = ''
busy = True
send('M')
thread = Thread(target=receive, args=('.'))
thread.start()
window.close()
s.close()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 24776
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
logMeteo_IR.py
|
#!/usr/bin/env python
import board
import requests
import argparse
import busio
import time
import datetime
import adafruit_bme280
import threading
import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library
import subprocess
import logging
import socket
from systemd import journal
""" blink the LED for a bit"""
def blinkLED():
blinkTime = 0.1
for i in range(2):
GPIO.output(pinID, GPIO.HIGH) # Turn on
time.sleep(blinkTime) # Sleep
GPIO.output(pinID, GPIO.LOW) # Turn off
time.sleep(blinkTime) # Sleep
def errorLED():
blinkTime = 0.05
for i in range(50):
GPIO.output(pinID, GPIO.HIGH) # Turn on
time.sleep(blinkTime) # Sleep
GPIO.output(pinID, GPIO.LOW) # Turn off
time.sleep(blinkTime) # Sleep
class logBufferClass():
def __init__(self, debug=False):
self.filename = "/var/log/logbuffer.tmp"
if debug: self.filename="logbuffer.tmp"
self.logData = []
self.uploadDestination = "https://www.astrofarm.eu/upload"
if debug: self.uploadDestination = "http://astrofarm.local:3001/upload"
def addEntry(self, logLine):
self.logData.append(logLine)
self.dump()
return len(self.logData)
def load(self):
loadFile = open(self.filename, "r")
data = loadFile.readlines()
for d in data:
self.logData.append(d.strip())
loadFile.close()
def dump(self):
dumpFile = open(self.filename, "wt")
for item in self.logData:
dumpFile.write(item + "\n")
dumpFile.close()
def clear(self):
self.logData = []
self.dump()
def upload(self):
print("Uploading logBuffer")
success = False
print("Uploading to ", self.uploadDestination)
myobj = {'logData': self.logData}
print(myobj)
try:
x = requests.post(self.uploadDestination, data = myobj)
print(x.text)
if x.text == "SUCCESS": success = True
self.clear()
except Exception as e:
success = False
print(e)
return success
def getIRSky():
readCommand = ["/home/pi/share/meteopi/readTsky"]
result = subprocess.run(readCommand, stdout=subprocess.PIPE)
Tsky = float(result.stdout.decode('utf-8'))
return Tsky
def getIRAmbient():
readCommand = ["/home/pi/share/meteopi/readTamb"]
result = subprocess.run(readCommand, stdout=subprocess.PIPE)
Tamb = float(result.stdout.decode('utf-8'))
return Tamb
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Polls the BME 280 sensor for temperature, pressure and relative humidity.')
parser.add_argument('-c', '--cadence', type=int, default=60, help='Cadence in seconds.' )
parser.add_argument('-u', '--upload', type=int, default=300, help='Upload cadence in seconds.' )
parser.add_argument('-s', '--service', action="store_true", default=False, help='Specify this option if running as a service.' )
args = parser.parse_args()
debug = False
GPIO.setwarnings(True) # Ignore warning for now
GPIO.setmode(GPIO.BCM) # Use BCM pin numbering
pinID = 16
cadence = args.cadence
GPIO.setup(pinID, GPIO.OUT, initial=GPIO.HIGH) #
i2c = busio.I2C(board.SCL, board.SDA)
bme280 = adafruit_bme280.Adafruit_BME280_I2C(i2c, address = 0x77)
logBuffer = []
lastUpload = datetime.datetime.now()
uploadCadence = args.upload
cpuTempPath = "/sys/class/thermal/thermal_zone0/temp"
if args.service:
log = logging.getLogger('logmeteo.service')
log.addHandler(journal.JournaldLogHandler())
log.setLevel(logging.INFO)
logLine = "Starting the logmeteo service with a cadence of %d seconds"%cadence
log.info(logLine)
try:
logFile = open("/var/log/meteo.log", "at")
except PermissionError:
logFile = open("debug.log", "at")
print("Not running as root, so can't write to /var/log. Creating a local 'debug.log' file.")
debug = True
print("Creating logBuffer")
logBuffer = logBufferClass(debug=debug)
print("Finished creating logBuffer")
logBuffer.load()
IRdisabled = False
errorFlash = None
while True:
currentTime = datetime.datetime.now()
try:
# Get the CPU temperatures
CPUtempFile = open(cpuTempPath, "rt")
for line in CPUtempFile:
cpuTemp = float(line.strip())
CPUtempFile.close()
# Get the hostname of this device
hostname = socket.gethostname()
# Get the IR detector temperatures
if not IRdisabled:
try:
IRsky = getIRSky()
IRambient = getIRAmbient()
except:
IRdisabled = True
if IRdisabled:
IRsky = -100
IRambient = -100
logLine = "%s|%s|%0.1f|%0.1f|%0.1f|%0.1f|%0.1f|%0.1f"%(str(currentTime), hostname, bme280.temperature, bme280.humidity, bme280.pressure, cpuTemp/1000, IRambient, IRsky)
logBuffer.addEntry(logLine)
if args.service: log.info(logLine)
timeSinceUpload = currentTime - lastUpload
if debug: print("time since last upload %d seconds"%timeSinceUpload.seconds)
if timeSinceUpload.seconds > uploadCadence:
if logBuffer.upload():
lastUpload = datetime.datetime.now()
if args.service: log.info("Uploaded data successfully")
else:
lastUpload = datetime.datetime.now()
print("Upload failed! Will try again in %d seconds"%uploadCadence)
if args.service: log.error("Failed to upload data. Will try again in %d seconds."%uploadCadence)
t = threading.Thread(name='non-block', target=blinkLED)
t.start()
logFile.write(logLine + "\n")
if debug: print(logLine.strip())
logFile.flush()
except OSError as e:
if debug:
print("Error connecting to sensor")
print(e)
errorFlash = threading.Thread(name='error flash', target=errorLED)
errorFlash.start()
time.sleep(cadence)
|
proxy.py
|
#
# Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import uuid
import flask
import sys
import logging
import time
import threading
import json
import subprocess as sp
from gevent.pywsgi import WSGIServer
from lithops.constants import LITHOPS_TEMP_DIR, JOBS_DONE_DIR, \
REMOTE_INSTALL_DIR, PX_LOG_FILE, LOGS_DIR
from lithops.storage.utils import create_job_key
from lithops.localhost.localhost import LocalhostHandler
from lithops.standalone.standalone import StandaloneHandler
from lithops import constants
from lithops.utils import verify_runtime_name
os.makedirs(LITHOPS_TEMP_DIR, exist_ok=True)
os.makedirs(LOGS_DIR, exist_ok=True)
log_file_fd = open(PX_LOG_FILE, 'a')
sys.stdout = log_file_fd
sys.stderr = log_file_fd
logging.basicConfig(filename=PX_LOG_FILE, level=logging.INFO,
format=constants.LOGGER_FORMAT)
logger = logging.getLogger('proxy')
proxy = flask.Flask(__name__)
last_usage_time = time.time()
keeper = None
jobs = {}
backend_handler = None
def budget_keeper():
global last_usage_time
global jobs
global backend_handler
global backend_handler_backend
jobs_running = False
logger.info("BudgetKeeper started")
if backend_handler.auto_dismantle:
logger.info('Auto dismantle activated - Soft timeout: {}s, Hard Timeout: {}s'
.format(backend_handler.soft_dismantle_timeout,
backend_handler.hard_dismantle_timeout))
else:
# If auto_dismantle is deactivated, the VM will be always automatically
# stopped after hard_dismantle_timeout. This will prevent the VM
# being started forever due a wrong configuration
logger.info('Auto dismantle deactivated - Hard Timeout: {}s'
.format(backend_handler.hard_dismantle_timeout))
logger.info("Jobs keys are {}".format(jobs.keys()))
while True:
time_since_last_usage = time.time() - last_usage_time
check_interval = backend_handler.soft_dismantle_timeout / 10
for job_key in jobs.keys():
done = os.path.join(JOBS_DONE_DIR, job_key+'.done')
if os.path.isfile(done):
jobs[job_key] = 'done'
if len(jobs) > 0 and all(value == 'done' for value in jobs.values()) \
and backend_handler.auto_dismantle:
# here we need to catch a moment when number of running jobs become zero.
# when it happens we reset countdown back to soft_dismantle_timeout
if jobs_running:
jobs_running = False
last_usage_time = time.time()
time_since_last_usage = time.time() - last_usage_time
time_to_dismantle = int(backend_handler.soft_dismantle_timeout - time_since_last_usage)
else:
time_to_dismantle = int(backend_handler.hard_dismantle_timeout - time_since_last_usage)
jobs_running = True
if time_to_dismantle > 0:
logger.info("Time to dismantle: {} seconds".format(time_to_dismantle))
time.sleep(check_interval)
else:
logger.info("Dismantling setup")
try:
backend_handler_backend.stop()
except Exception as e:
logger.info("Dismantle error {}".format(e))
def init_keeper():
global keeper
global backend_handler
global backend_handler_backend
config_file = os.path.join(REMOTE_INSTALL_DIR, 'config')
with open(config_file, 'r') as cf:
standalone_config = json.load(cf)
backend_handler = StandaloneHandler(standalone_config)
access_data = os.path.join(REMOTE_INSTALL_DIR, 'access.data')
with open(access_data, 'r') as ad:
lines = ad.readlines()
for line in lines:
res = line.strip().split()
break
logger.info("Parsed self IP {} and instance ID {}".format(res[0], res[1]))
backend_handler_backend = backend_handler.create_backend_handler(res[1], res[0])
keeper = threading.Thread(target=budget_keeper)
keeper.daemon = True
keeper.start()
def error(msg):
response = flask.jsonify({'error': msg})
response.status_code = 404
return response
@proxy.route('/run', methods=['POST'])
def run():
"""
Run a job
"""
global last_usage_time
global backend_handler
global jobs
message = flask.request.get_json(force=True, silent=True)
if message and not isinstance(message, dict):
return error('The action did not receive a dictionary as an argument.')
try:
runtime = message['job_description']['runtime_name']
verify_runtime_name(runtime)
except Exception as e:
return error(str(e))
last_usage_time = time.time()
standalone_config = message['config']['standalone']
backend_handler.auto_dismantle = standalone_config['auto_dismantle']
backend_handler.soft_dismantle_timeout = standalone_config['soft_dismantle_timeout']
backend_handler.hard_dismantle_timeout = standalone_config['hard_dismantle_timeout']
act_id = str(uuid.uuid4()).replace('-', '')[:12]
executor_id = message['executor_id']
job_id = message['job_id']
job_key = create_job_key(executor_id, job_id)
jobs[job_key] = 'running'
localhost_handler = LocalhostHandler({'runtime': runtime})
localhost_handler.run_job(message)
response = flask.jsonify({'activationId': act_id})
response.status_code = 202
return response
@proxy.route('/ping', methods=['GET'])
def ping():
response = flask.jsonify({'response': 'pong'})
response.status_code = 200
return response
@proxy.route('/preinstalls', methods=['GET'])
def preinstalls():
message = flask.request.get_json(force=True, silent=True)
if message and not isinstance(message, dict):
return error('The action did not receive a dictionary as an argument.')
try:
runtime = message['runtime']
verify_runtime_name(runtime)
except Exception as e:
return error(str(e))
localhost_handler = LocalhostHandler(message)
runtime_meta = localhost_handler.create_runtime(runtime)
response = flask.jsonify(runtime_meta)
response.status_code = 200
return response
def install_environment():
"""
Install docker command and Python deps in case they are not installed.
Only for Ubuntu-based OS
"""
os_version = sp.check_output('uname -a', shell=True).decode()
if 'Ubuntu' in os_version:
try:
sp.check_output('docker ps > /dev/null 2>&1', shell=True)
docker_installed = True
logger.info("Environment already installed")
except Exception:
logger.info("Environment not installed")
docker_installed = False
if not docker_installed:
# If docker is not installed, nothing is installed, so lets install anything here
cmd = 'apt-get remove docker docker-engine docker.io containerd runc -y; '
cmd += 'apt-get update '
cmd += '&& apt-get install apt-transport-https ca-certificates curl gnupg-agent software-properties-common -y '
cmd += '&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - > /dev/null 2>&1 '
cmd += '&& add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" '
cmd += '&& apt-get update '
cmd += '&& apt-get install docker-ce docker-ce-cli containerd.io -y '
try:
logger.info("Installing Docker...")
with open(PX_LOG_FILE, 'a') as lf:
sp.run(cmd, shell=True, stdout=lf, stderr=lf, universal_newlines=True)
logger.info("Docker installed successfully")
except Exception as e:
logger.info("There was an error installing Docker: {}".format(e))
cmd = 'pip3 install -U lithops'
try:
logger.info("Installing python packages...")
with open(PX_LOG_FILE, 'a') as lf:
sp.run(cmd, shell=True, stdout=lf, stderr=lf, universal_newlines=True)
logger.info("Python packages installed successfully")
except Exception as e:
logger.info("There was an error installing the python packages: {}".format(e))
else:
logger.info("Linux images different from Ubuntu do not support automatic environment installation")
def main():
install_environment()
init_keeper()
port = int(os.getenv('PORT', 8080))
server = WSGIServer(('127.0.0.1', port), proxy, log=proxy.logger)
server.serve_forever()
if __name__ == '__main__':
main()
|
__init__.py
|
"""
Base classes for job runner plugins.
"""
import os
import time
import string
import logging
import datetime
import threading
import subprocess
from Queue import Queue, Empty
import galaxy.jobs
from galaxy.jobs.command_factory import build_command
from galaxy import model
from galaxy.util import DATABASE_MAX_STRING_SIZE, shrink_stream_by_size
from galaxy.util import in_directory
from galaxy.util import ParamsWithSpecs
from galaxy.util import ExecutionTimer
from galaxy.util.bunch import Bunch
from galaxy.jobs.runners.util.job_script import write_script
from galaxy.jobs.runners.util.job_script import job_script
from galaxy.jobs.runners.util.env import env_to_statement
from .state_handler_factory import build_state_handlers
log = logging.getLogger( __name__ )
STOP_SIGNAL = object()
JOB_RUNNER_PARAMETER_UNKNOWN_MESSAGE = "Invalid job runner parameter for this plugin: %s"
JOB_RUNNER_PARAMETER_MAP_PROBLEM_MESSAGE = "Job runner parameter '%s' value '%s' could not be converted to the correct type"
JOB_RUNNER_PARAMETER_VALIDATION_FAILED_MESSAGE = "Job runner parameter %s failed validation"
GALAXY_LIB_ADJUST_TEMPLATE = """GALAXY_LIB="%s"; if [ "$GALAXY_LIB" != "None" ]; then if [ -n "$PYTHONPATH" ]; then PYTHONPATH="$GALAXY_LIB:$PYTHONPATH"; else PYTHONPATH="$GALAXY_LIB"; fi; export PYTHONPATH; fi;"""
GALAXY_VENV_TEMPLATE = """GALAXY_VIRTUAL_ENV="%s"; if [ "$GALAXY_VIRTUAL_ENV" != "None" -a -z "$VIRTUAL_ENV" -a -f "$GALAXY_VIRTUAL_ENV/bin/activate" ]; then . "$GALAXY_VIRTUAL_ENV/bin/activate"; fi;"""
class RunnerParams( ParamsWithSpecs ):
def _param_unknown_error( self, name ):
raise Exception( JOB_RUNNER_PARAMETER_UNKNOWN_MESSAGE % name )
def _param_map_error( self, name, value ):
raise Exception( JOB_RUNNER_PARAMETER_MAP_PROBLEM_MESSAGE % ( name, value ) )
def _param_vaildation_error( self, name, value ):
raise Exception( JOB_RUNNER_PARAMETER_VALIDATION_FAILED_MESSAGE % name )
class BaseJobRunner( object ):
DEFAULT_SPECS = dict( recheck_missing_job_retries=dict( map=int, valid=lambda x: x >= 0, default=0 ) )
def __init__( self, app, nworkers, **kwargs ):
"""Start the job runner
"""
self.app = app
self.sa_session = app.model.context
self.nworkers = nworkers
runner_param_specs = self.DEFAULT_SPECS.copy()
if 'runner_param_specs' in kwargs:
runner_param_specs.update( kwargs.pop( 'runner_param_specs' ) )
if kwargs:
log.debug( 'Loading %s with params: %s', self.runner_name, kwargs )
self.runner_params = RunnerParams( specs=runner_param_specs, params=kwargs )
self.runner_state_handlers = build_state_handlers()
def _init_worker_threads(self):
"""Start ``nworkers`` worker threads.
"""
self.work_queue = Queue()
self.work_threads = []
log.debug('Starting %s %s workers' % (self.nworkers, self.runner_name))
for i in range(self.nworkers):
worker = threading.Thread( name="%s.work_thread-%d" % (self.runner_name, i), target=self.run_next )
worker.setDaemon( True )
worker.start()
self.work_threads.append( worker )
def run_next(self):
"""Run the next item in the work queue (a job waiting to run)
"""
while True:
( method, arg ) = self.work_queue.get()
if method is STOP_SIGNAL:
return
# id and name are collected first so that the call of method() is the last exception.
try:
if isinstance(arg, AsynchronousJobState):
job_id = arg.job_wrapper.get_id_tag()
else:
# arg should be a JobWrapper/TaskWrapper
job_id = arg.get_id_tag()
except:
job_id = 'unknown'
try:
name = method.__name__
except:
name = 'unknown'
try:
method(arg)
except:
log.exception( "(%s) Unhandled exception calling %s" % ( job_id, name ) )
# Causes a runner's `queue_job` method to be called from a worker thread
def put(self, job_wrapper):
"""Add a job to the queue (by job identifier), indicate that the job is ready to run.
"""
put_timer = ExecutionTimer()
job = job_wrapper.get_job()
# Change to queued state before handing to worker thread so the runner won't pick it up again
job_wrapper.change_state( model.Job.states.QUEUED, flush=False, job=job )
# Persist the destination so that the job will be included in counts if using concurrency limits
job_wrapper.set_job_destination( job_wrapper.job_destination, None, flush=False, job=job )
self.sa_session.flush()
self.mark_as_queued(job_wrapper)
log.debug("Job [%s] queued %s" % (job_wrapper.job_id, put_timer))
def mark_as_queued(self, job_wrapper):
self.work_queue.put( ( self.queue_job, job_wrapper ) )
def shutdown( self ):
"""Attempts to gracefully shut down the worker threads
"""
log.info( "%s: Sending stop signal to %s worker threads" % ( self.runner_name, len( self.work_threads ) ) )
for i in range( len( self.work_threads ) ):
self.work_queue.put( ( STOP_SIGNAL, None ) )
# Most runners should override the legacy URL handler methods and destination param method
def url_to_destination(self, url):
"""
Convert a legacy URL to a JobDestination.
Job runner URLs are deprecated, JobDestinations should be used instead.
This base class method converts from a URL to a very basic
JobDestination without destination params.
"""
return galaxy.jobs.JobDestination(runner=url.split(':')[0])
def parse_destination_params(self, params):
"""Parse the JobDestination ``params`` dict and return the runner's native representation of those params.
"""
raise NotImplementedError()
def prepare_job(self, job_wrapper, include_metadata=False, include_work_dir_outputs=True):
"""Some sanity checks that all runners' queue_job() methods are likely to want to do
"""
job_id = job_wrapper.get_id_tag()
job_state = job_wrapper.get_state()
job_wrapper.is_ready = False
job_wrapper.runner_command_line = None
# Make sure the job hasn't been deleted
if job_state == model.Job.states.DELETED:
log.debug( "(%s) Job deleted by user before it entered the %s queue" % ( job_id, self.runner_name ) )
if self.app.config.cleanup_job in ( "always", "onsuccess" ):
job_wrapper.cleanup()
return False
elif job_state != model.Job.states.QUEUED:
log.info( "(%s) Job is in state %s, skipping execution" % ( job_id, job_state ) )
# cleanup may not be safe in all states
return False
# Prepare the job
try:
job_wrapper.prepare()
job_wrapper.runner_command_line = self.build_command_line(
job_wrapper,
include_metadata=include_metadata,
include_work_dir_outputs=include_work_dir_outputs,
)
except Exception as e:
log.exception("(%s) Failure preparing job" % job_id)
job_wrapper.fail( e.message if hasattr( e, 'message' ) else "Job preparation failed", exception=True )
return False
if not job_wrapper.runner_command_line:
job_wrapper.finish( '', '' )
return False
return True
# Runners must override the job handling methods
def queue_job(self, job_wrapper):
raise NotImplementedError()
def stop_job(self, job):
raise NotImplementedError()
def recover(self, job, job_wrapper):
raise NotImplementedError()
def build_command_line( self, job_wrapper, include_metadata=False, include_work_dir_outputs=True ):
container = self._find_container( job_wrapper )
return build_command(
self,
job_wrapper,
include_metadata=include_metadata,
include_work_dir_outputs=include_work_dir_outputs,
container=container
)
def get_work_dir_outputs( self, job_wrapper, job_working_directory=None ):
"""
Returns list of pairs (source_file, destination) describing path
to work_dir output file and ultimate destination.
"""
if not job_working_directory:
job_working_directory = os.path.abspath( job_wrapper.working_directory )
# Set up dict of dataset id --> output path; output path can be real or
# false depending on outputs_to_working_directory
output_paths = {}
for dataset_path in job_wrapper.get_output_fnames():
path = dataset_path.real_path
if self.app.config.outputs_to_working_directory:
path = dataset_path.false_path
output_paths[ dataset_path.dataset_id ] = path
output_pairs = []
# Walk job's output associations to find and use from_work_dir attributes.
job = job_wrapper.get_job()
job_tool = job_wrapper.tool
for (joda, dataset) in self._walk_dataset_outputs( job ):
if joda and job_tool:
hda_tool_output = job_tool.find_output_def( joda.name )
if hda_tool_output and hda_tool_output.from_work_dir:
# Copy from working dir to HDA.
# TODO: move instead of copy to save time?
source_file = os.path.join( job_working_directory, 'working', hda_tool_output.from_work_dir )
destination = job_wrapper.get_output_destination( output_paths[ dataset.dataset_id ] )
if in_directory( source_file, job_working_directory ):
output_pairs.append( ( source_file, destination ) )
else:
# Security violation.
log.exception( "from_work_dir specified a location not in the working directory: %s, %s" % ( source_file, job_wrapper.working_directory ) )
return output_pairs
def _walk_dataset_outputs( self, job ):
for dataset_assoc in job.output_datasets + job.output_library_datasets:
for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations:
if isinstance( dataset, self.app.model.HistoryDatasetAssociation ):
joda = self.sa_session.query( self.app.model.JobToOutputDatasetAssociation ).filter_by( job=job, dataset=dataset ).first()
yield (joda, dataset)
# TODO: why is this not just something easy like:
# for dataset_assoc in job.output_datasets + job.output_library_datasets:
# yield (dataset_assoc, dataset_assoc.dataset)
# I don't understand the reworking it backwards. -John
def _handle_metadata_externally( self, job_wrapper, resolve_requirements=False ):
"""
Set metadata externally. Used by the Pulsar job runner where this
shouldn't be attached to command line to execute.
"""
# run the metadata setting script here
# this is terminate-able when output dataset/job is deleted
# so that long running set_meta()s can be canceled without having to reboot the server
if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and job_wrapper.output_paths:
lib_adjust = GALAXY_LIB_ADJUST_TEMPLATE % job_wrapper.galaxy_lib_dir
venv = GALAXY_VENV_TEMPLATE % job_wrapper.galaxy_virtual_env
external_metadata_script = job_wrapper.setup_external_metadata( output_fnames=job_wrapper.get_output_fnames(),
set_extension=True,
tmp_dir=job_wrapper.working_directory,
# We don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
kwds={ 'overwrite' : False } )
external_metadata_script = "%s %s %s" % (lib_adjust, venv, external_metadata_script)
if resolve_requirements:
dependency_shell_commands = self.app.datatypes_registry.set_external_metadata_tool.build_dependency_shell_commands(job_directory=job_wrapper.working_directory)
if dependency_shell_commands:
if isinstance( dependency_shell_commands, list ):
dependency_shell_commands = "&&".join( dependency_shell_commands )
external_metadata_script = "%s&&%s" % ( dependency_shell_commands, external_metadata_script )
log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) )
external_metadata_proc = subprocess.Popen( args=external_metadata_script,
shell=True,
cwd=job_wrapper.working_directory,
env=os.environ,
preexec_fn=os.setpgrp )
job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid, self.sa_session )
external_metadata_proc.wait()
log.debug( 'execution of external set_meta for job %d finished' % job_wrapper.job_id )
def get_job_file(self, job_wrapper, **kwds):
job_metrics = job_wrapper.app.job_metrics
job_instrumenter = job_metrics.job_instrumenters[ job_wrapper.job_destination.id ]
env_setup_commands = kwds.get( 'env_setup_commands', [] )
env_setup_commands.append( job_wrapper.get_env_setup_clause() or '' )
destination = job_wrapper.job_destination or {}
envs = destination.get( "env", [] )
envs.extend( job_wrapper.environment_variables )
for env in envs:
env_setup_commands.append( env_to_statement( env ) )
command_line = job_wrapper.runner_command_line
options = dict(
job_instrumenter=job_instrumenter,
galaxy_lib=job_wrapper.galaxy_lib_dir,
galaxy_virtual_env=job_wrapper.galaxy_virtual_env,
env_setup_commands=env_setup_commands,
working_directory=os.path.abspath( job_wrapper.working_directory ),
command=command_line,
shell=job_wrapper.shell,
)
# Additional logging to enable if debugging from_work_dir handling, metadata
# commands, etc... (or just peak in the job script.)
job_id = job_wrapper.job_id
log.debug( '(%s) command is: %s' % ( job_id, command_line ) )
options.update(**kwds)
return job_script(**options)
def write_executable_script( self, path, contents, mode=0o755 ):
write_script( path, contents, self.app.config, mode=mode )
def _complete_terminal_job( self, ajs, **kwargs ):
if ajs.job_wrapper.get_state() != model.Job.states.DELETED:
self.work_queue.put( ( self.finish_job, ajs ) )
def _find_container(
self,
job_wrapper,
compute_working_directory=None,
compute_tool_directory=None,
compute_job_directory=None
):
if not compute_working_directory:
compute_working_directory = job_wrapper.tool_working_directory
if not compute_tool_directory:
compute_tool_directory = job_wrapper.tool.tool_dir
tool = job_wrapper.tool
from galaxy.tools.deps import containers
tool_info = containers.ToolInfo(tool.containers, tool.requirements)
job_info = containers.JobInfo(compute_working_directory, compute_tool_directory, compute_job_directory)
destination_info = job_wrapper.job_destination.params
return self.app.container_finder.find_container(
tool_info,
destination_info,
job_info
)
def _handle_runner_state( self, runner_state, job_state ):
try:
for handler in self.runner_state_handlers.get(runner_state, []):
handler(self.app, self, job_state)
if job_state.runner_state_handled:
break
except:
log.exception('Caught exception in runner state handler:')
def mark_as_resubmitted( self, job_state, info=None ):
job_state.job_wrapper.mark_as_resubmitted( info=info )
if not self.app.config.track_jobs_in_database:
job_state.job_wrapper.change_state( model.Job.states.QUEUED )
self.app.job_manager.job_handler.dispatcher.put( job_state.job_wrapper )
class JobState( object ):
"""
Encapsulate state of jobs.
"""
runner_states = Bunch(
WALLTIME_REACHED='walltime_reached',
MEMORY_LIMIT_REACHED='memory_limit_reached',
GLOBAL_WALLTIME_REACHED='global_walltime_reached',
OUTPUT_SIZE_LIMIT='output_size_limit'
)
def __init__( self ):
self.runner_state_handled = False
def set_defaults( self, files_dir ):
if self.job_wrapper is not None:
id_tag = self.job_wrapper.get_id_tag()
if files_dir is not None:
self.job_file = JobState.default_job_file( files_dir, id_tag )
self.output_file = os.path.join( files_dir, 'galaxy_%s.o' % id_tag )
self.error_file = os.path.join( files_dir, 'galaxy_%s.e' % id_tag )
self.exit_code_file = os.path.join( files_dir, 'galaxy_%s.ec' % id_tag )
job_name = 'g%s' % id_tag
if self.job_wrapper.tool.old_id:
job_name += '_%s' % self.job_wrapper.tool.old_id
if self.job_wrapper.user:
job_name += '_%s' % self.job_wrapper.user
self.job_name = ''.join( map( lambda x: x if x in ( string.letters + string.digits + '_' ) else '_', job_name ) )
@staticmethod
def default_job_file( files_dir, id_tag ):
return os.path.join( files_dir, 'galaxy_%s.sh' % id_tag )
@staticmethod
def default_exit_code_file( files_dir, id_tag ):
return os.path.join( files_dir, 'galaxy_%s.ec' % id_tag )
class AsynchronousJobState( JobState ):
"""
Encapsulate the state of an asynchronous job, this should be subclassed as
needed for various job runners to capture additional information needed
to communicate with distributed resource manager.
"""
def __init__( self, files_dir=None, job_wrapper=None, job_id=None, job_file=None, output_file=None, error_file=None, exit_code_file=None, job_name=None, job_destination=None ):
super( AsynchronousJobState, self ).__init__()
self.old_state = None
self._running = False
self.check_count = 0
self.start_time = None
self.job_wrapper = job_wrapper
# job_id is the DRM's job id, not the Galaxy job id
self.job_id = job_id
self.job_destination = job_destination
self.job_file = job_file
self.output_file = output_file
self.error_file = error_file
self.exit_code_file = exit_code_file
self.job_name = job_name
self.set_defaults( files_dir )
self.cleanup_file_attributes = [ 'job_file', 'output_file', 'error_file', 'exit_code_file' ]
@property
def running( self ):
return self._running
@running.setter
def running( self, is_running ):
self._running = is_running
# This will be invalid for job recovery
if self.start_time is None:
self.start_time = datetime.datetime.now()
def check_limits( self, runtime=None ):
limit_state = None
if self.job_wrapper.has_limits():
self.check_count += 1
if self.running and (self.check_count % 20 == 0):
if runtime is None:
runtime = datetime.datetime.now() - (self.start_time or datetime.datetime.now())
self.check_count = 0
limit_state = self.job_wrapper.check_limits( runtime=runtime )
if limit_state is not None:
# Set up the job for failure, but the runner will do the actual work
self.runner_state, self.fail_message = limit_state
self.stop_job = True
return True
return False
def cleanup( self ):
for file in [ getattr( self, a ) for a in self.cleanup_file_attributes if hasattr( self, a ) ]:
try:
os.unlink( file )
except Exception as e:
log.debug( "(%s/%s) Unable to cleanup %s: %s" % ( self.job_wrapper.get_id_tag(), self.job_id, file, str( e ) ) )
def register_cleanup_file_attribute( self, attribute ):
if attribute not in self.cleanup_file_attributes:
self.cleanup_file_attributes.append( attribute )
class AsynchronousJobRunner( BaseJobRunner ):
"""Parent class for any job runner that runs jobs asynchronously (e.g. via
a distributed resource manager). Provides general methods for having a
thread to monitor the state of asynchronous jobs and submitting those jobs
to the correct methods (queue, finish, cleanup) at appropriate times..
"""
def __init__( self, app, nworkers, **kwargs ):
super( AsynchronousJobRunner, self ).__init__( app, nworkers, **kwargs )
# 'watched' and 'queue' are both used to keep track of jobs to watch.
# 'queue' is used to add new watched jobs, and can be called from
# any thread (usually by the 'queue_job' method). 'watched' must only
# be modified by the monitor thread, which will move items from 'queue'
# to 'watched' and then manage the watched jobs.
self.watched = []
self.monitor_queue = Queue()
def _init_monitor_thread(self):
self.monitor_thread = threading.Thread( name="%s.monitor_thread" % self.runner_name, target=self.monitor )
self.monitor_thread.setDaemon( True )
self.monitor_thread.start()
def handle_stop(self):
# DRMAA and SGE runners should override this and disconnect.
pass
def monitor( self ):
"""
Watches jobs currently in the monitor queue and deals with state
changes (queued to running) and job completion.
"""
while True:
# Take any new watched jobs and put them on the monitor list
try:
while True:
async_job_state = self.monitor_queue.get_nowait()
if async_job_state is STOP_SIGNAL:
# TODO: This is where any cleanup would occur
self.handle_stop()
return
self.watched.append( async_job_state )
except Empty:
pass
# Iterate over the list of watched jobs and check state
try:
self.check_watched_items()
except Exception:
log.exception('Unhandled exception checking active jobs')
# Sleep a bit before the next state check
time.sleep( 1 )
def monitor_job(self, job_state):
self.monitor_queue.put( job_state )
def shutdown( self ):
"""Attempts to gracefully shut down the monitor thread"""
log.info( "%s: Sending stop signal to monitor thread" % self.runner_name )
self.monitor_queue.put( STOP_SIGNAL )
# Call the parent's shutdown method to stop workers
super( AsynchronousJobRunner, self ).shutdown()
def check_watched_items(self):
"""
This method is responsible for iterating over self.watched and handling
state changes and updating self.watched with a new list of watched job
states. Subclasses can opt to override this directly (as older job runners will
initially) or just override check_watched_item and allow the list processing to
reuse the logic here.
"""
new_watched = []
for async_job_state in self.watched:
new_async_job_state = self.check_watched_item(async_job_state)
if new_async_job_state:
new_watched.append(new_async_job_state)
self.watched = new_watched
# Subclasses should implement this unless they override check_watched_items all together.
def check_watched_item(self, job_state):
raise NotImplementedError()
def finish_job( self, job_state ):
"""
Get the output/error for a finished job, pass to `job_wrapper.finish`
and cleanup all the job's temporary files.
"""
galaxy_id_tag = job_state.job_wrapper.get_id_tag()
external_job_id = job_state.job_id
# To ensure that files below are readable, ownership must be reclaimed first
job_state.job_wrapper.reclaim_ownership()
# wait for the files to appear
which_try = 0
while which_try < (self.app.config.retry_job_output_collection + 1):
try:
stdout = shrink_stream_by_size( file( job_state.output_file, "r" ), DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
stderr = shrink_stream_by_size( file( job_state.error_file, "r" ), DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
which_try = (self.app.config.retry_job_output_collection + 1)
except Exception as e:
if which_try == self.app.config.retry_job_output_collection:
stdout = ''
stderr = 'Job output not returned from cluster'
log.error( '(%s/%s) %s: %s' % ( galaxy_id_tag, external_job_id, stderr, str( e ) ) )
else:
time.sleep(1)
which_try += 1
try:
# This should be an 8-bit exit code, but read ahead anyway:
exit_code_str = file( job_state.exit_code_file, "r" ).read(32)
except:
# By default, the exit code is 0, which typically indicates success.
exit_code_str = "0"
try:
# Decode the exit code. If it's bogus, then just use 0.
exit_code = int(exit_code_str)
except:
log.warning( "(%s/%s) Exit code '%s' invalid. Using 0." % ( galaxy_id_tag, external_job_id, exit_code_str ) )
exit_code = 0
# clean up the job files
cleanup_job = job_state.job_wrapper.cleanup_job
if cleanup_job == "always" or ( not stderr and cleanup_job == "onsuccess" ):
job_state.cleanup()
try:
job_state.job_wrapper.finish( stdout, stderr, exit_code )
except:
log.exception( "(%s/%s) Job wrapper finish method failed" % ( galaxy_id_tag, external_job_id ) )
job_state.job_wrapper.fail( "Unable to finish job", exception=True )
def fail_job( self, job_state ):
if getattr( job_state, 'stop_job', True ):
self.stop_job( self.sa_session.query( self.app.model.Job ).get( job_state.job_wrapper.job_id ) )
self._handle_runner_state( 'failure', job_state )
# Not convinced this is the best way to indicate this state, but
# something necessary
if not job_state.runner_state_handled:
job_state.job_wrapper.fail( getattr( job_state, 'fail_message', 'Job failed' ) )
if job_state.job_wrapper.cleanup_job == "always":
job_state.cleanup()
def mark_as_finished(self, job_state):
self.work_queue.put( ( self.finish_job, job_state ) )
def mark_as_failed(self, job_state):
self.work_queue.put( ( self.fail_job, job_state ) )
|
scan_latex_sources.py
|
#! /usr/bin/python
# Author: Marcel Simader (marcel.simader@jku.at)
# Date: 18.02.2022
# (c) Marcel Simader 2022, Johannes Kepler Universität Linz
from typing import Union, List, Optional, Iterator
from io import TextIOBase
import os, sys
import argparse, inspect, pathlib, subprocess, re, shutil, ctypes, glob
import queue, threading, time
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~ GLOBALS ~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
DESC_STR = r"Scan LaTeX sources for definitions (e.g. '\newcommand')."
OUT_IO = sys.stdout
NUM_THREADS = 8
assert NUM_THREADS > 1
THREAD_TIMEOUT = 0.2
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~ INTERNAL GLOBALS ~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
File = Union[str, TextIOBase]
# ~~~~~~~~~~~~~~~~~~~~ OBJECTS ~~~~~~~~~~~~~~~~~~~~
files, messages = queue.Queue(), queue.Queue()
included_files = set()
LOG_FILE_REGEX = re.compile(r"\(([-_/\.\w]+)\nPackage", re.MULTILINE)
LEGAL_FILE_EXTENSIONS = (".tex", ".latex", ".sty", ".cls", ".log",)
# ~~~~~~~~~~~~~~~~~~~~ CLASSES ~~~~~~~~~~~~~~~~~~~~
class Element:
prefix = "?Replace the Prefix?"
def __init__(self, file):
self.file = norm_class(file)
def __str__(self) -> str:
return f"{self.prefix} {self.file}"
class RegexElement(Element):
comment = re.compile(r"%.*$")
@classmethod
def assert_len(cls, obj, length: int) -> None:
if len(obj) != length:
err(f"Object '{obj}' is not of expected length '{length}'")
sys.exit(1)
@classmethod
def match(cls, file, text: str) -> Iterator["RegexElement"]:
for m in re.finditer(cls.regex, text):
start, stop = m.span()
# check if there were comments up to now
if re.search(cls.comment, text[:stop]) is None:
yield m.groups()
class Command(RegexElement):
prefix = "command"
regex = re.compile(r"\\(?:re)?newcommand(?:\*)?" \
+ r"\{([^@#\}\n]+)\}" \
+ r"(?:\[(\d+)\])?", re.MULTILINE)
def __init__(self, file, name, num_args):
super().__init__(file)
self.name = name
self.num_args = num_args
@classmethod
def match(cls, file, text: str) -> Iterator["Command"]:
for m in super().match(file, text):
cls.assert_len(m, 2)
yield Command(file, m[0], 0 if (m[1] is None) else m[1])
def __str__(self) -> str:
return f"{super().__str__()} {self.name} {self.num_args}"
class Environment(RegexElement):
prefix = "environ"
regex = re.compile(r"\\(?:re)?newenvironment(?:\*)?" \
+ r"\{([^@#\}\n]+)\}", re.MULTILINE)
# + r"(?:\[(\d+)\])?"
def __init__(self, file, name):
super().__init__(file)
self.name = name
@classmethod
def match(cls, file, text: str) -> Iterator["Environment"]:
for m in super().match(file, text):
cls.assert_len(m, 1)
yield Environment(file, m[0])
def __str__(self) -> str:
return f"{super().__str__()} {self.name}"
class Include(RegexElement):
prefix = "include"
@classmethod
def include(cls, file) -> Optional["Include"]:
if file not in included_files:
return cls(file)
def __init__(self, file):
super().__init__(file)
included_files.add(self.file)
def __str__(self) -> str:
return super().__str__()
class IncludeCls(Include):
regex = re.compile(r"\\documentclass" \
+ r"(?:\[[^@\]\n]+\])?" \
+ r"\{([^@#\}\n]+)\}", re.MULTILINE)
@classmethod
def match(cls, file, text: str) -> Iterator[Optional["IncludeCls"]]:
for m in super().match(file, text):
cls.assert_len(m, 1)
yield IncludeCls.include(m[0])
class IncludeSty(Include):
regex = re.compile(r"\\(?:RequirePackage|usepackage)" \
+ r"(?:\[[^@\]\n]+\])?" \
+ r"\{([^@#\}\n]+)\}", re.MULTILINE)
@classmethod
def match(cls, file, text: str) -> Iterator[Optional["IncludeSty"]]:
for m in super().match(file, text):
cls.assert_len(m, 1)
yield IncludeSty.include(m[0])
SCAN_PRIORITY_LIST = (IncludeCls, IncludeSty, Command, Environment,)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~ FUNCTIONS ~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def glob_files(in_files: List[File]) -> None:
for el in in_files:
# directly add IOs
if isinstance(el, TextIOBase):
files.put(el)
continue
# we have a string
for glob_el in glob.iglob(os.path.expanduser(el), recursive=True):
if glob_el.endswith(".log"):
# a latex log file, hopefully
try:
with open(glob_el, "r", encoding="utf-8") as file:
scan_log_file(file)
except UnicodeDecodeError as e:
err(f"Failed to decode file '{glob_el}': {e}")
elif any(glob_el.endswith(ext) for ext in LEGAL_FILE_EXTENSIONS):
# let file threads deal with extension
files.put(glob_el)
for _ in range(NUM_THREADS):
files.put(None)
def scan_file(io: TextIOBase) -> None:
text = io.read()
for cls in SCAN_PRIORITY_LIST:
for obj in cls.match(io.name, text):
if obj is not None:
messages.put(obj)
def scan_log_file(io: TextIOBase) -> None:
text = io.read()
for m in re.finditer(LOG_FILE_REGEX, text):
if m is not None and len(m.groups()) >= 1:
path, *_ = m.groups()
files.put(path)
def file_consumer() -> None:
while True:
try:
el = files.get(block=True, timeout=THREAD_TIMEOUT)
if el is None:
break
except Exception:
break
try:
# either pass TextIOBase instance or open a file
if isinstance(el, TextIOBase):
scan_file(el)
else:
if os.path.isfile(el):
with open(el, "r", encoding="utf-8") as file:
scan_file(file)
except UnicodeDecodeError as e:
err(f"Failed to decode file '{el}': {e}")
def message_consumer() -> None:
while True:
try:
el = messages.get(block=True, timeout=THREAD_TIMEOUT)
if el is None:
break
except Exception:
break
OUT_IO.write(f"{str(el)}\n")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~ UTILS ~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def norm_class(path: str) -> str:
return os.path.basename(path).split(".")[0]
def err(msg: str, end: str="\n") -> None:
sys.stderr.write(f"{msg}{end}")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~ MAIN ~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def scan_files(in_files: List[File]) -> None:
# start up glob thread
g_thread = threading.Thread(target=glob_files, args=(in_files,))
g_thread.start()
# start up file threads
f_threads = []
for _ in range(NUM_THREADS - 1):
f_thread = threading.Thread(target=file_consumer)
f_thread.start()
f_threads.append(f_thread)
# start up message thread
m_thread = threading.Thread(target=message_consumer)
m_thread.start()
# wait for glob thread
g_thread.join()
# stop file threads
for f_thread in f_threads:
f_thread.join()
# stop message thread
messages.put(None)
m_thread.join()
def main() -> None:
# args parsing
parser = argparse.ArgumentParser(description=DESC_STR)
parser.add_argument(
"files",
help="the files to scan. If left blank, will read from stdin instead",
nargs="*",
type=str,
)
args = parser.parse_args(sys.argv[1:])
# if no files are given, scan stdin
if len(args.files) < 1:
scan_files((sys.stdin,))
else:
scan_files(args.files)
if __name__ == "__main__":
main()
|
discordsocketthread.py
|
import asyncio
import threading
import typing
from functools import partial as func_partial
from legacy import discordsocket
class DiscordSocketThread:
def __init__(self, token: str = None, discord_socket: discordsocket.DiscordSocket = None,
shard_total: int = 1, shard_num: int = 0):
if token is not None:
# NOTE: check that the string has proper length and etc.?
self.discord_socket_loop = asyncio.new_event_loop()
self.discord_socket = discordsocket.DiscordSocket(token, shard_total=shard_total, shard_num=shard_num,
event_loop=self.discord_socket_loop)
elif discord_socket is not None:
self.discord_socket = discord_socket
self.discord_socket.event_loop = self.discord_socket_loop
else:
raise TypeError('Neither token nor socket was passed to socket.')
self.queue_to_hook_dict = {}
self.discord_socket_loop.create_task(self.discord_socket.init())
self.local_loop = asyncio.get_event_loop()
# TODO: Timeout on queues as they can lock the application
self.thread = threading.Thread(target=self.discord_socket_loop.run_forever)
self.thread.start()
'''
ready_queue = asyncio.Queue(maxsize=1, loop=local_loop)
self.queue_register(ready_queue, lambda x: x['t'] == 'READY')
guild_create_queue = asyncio.Queue(loop=local_loop)
self.queue_register(guild_create_queue, lambda x: x['t'] == 'GUILD_CREATE')
self.ready_payload = local_loop.run_until_complete(ready_queue.get())
self.queue_unregister(ready_queue)
self.guild_init_payloads = []
for i in range(len(self.ready_payload['d']['guilds'])):
self.guild_init_payloads.append(local_loop.run_until_complete(guild_create_queue.get()))
self.queue_unregister(guild_create_queue)
'''
# Event queues
self.main_queue = None
self.main_event_hook = None
self.listeners_count = 0
self.listeners = {}
self.event_dispatcher_task = None
def request_guild_members(self, guild_id: str, query: str = '', limit: int = 0):
return asyncio.run_coroutine_threadsafe(
self.discord_socket.request_guild_members(
guild_id, query, limit),
self.discord_socket_loop)
def status_update(self, status_type: str, is_afk: bool,
game: dict = None, since_time_seconds: int = None):
return asyncio.run_coroutine_threadsafe(
self.discord_socket.status_update(
status_type, is_afk, game, since_time_seconds),
self.discord_socket_loop)
def voice_state_update(self, guild_id: str, channel_id: int = None,
mute_self: bool = None, deaf_self: bool = False):
return asyncio.run_coroutine_threadsafe(
self.discord_socket.voice_state_update(
guild_id, channel_id, mute_self, deaf_self),
self.discord_socket_loop)
# region Events
def _main_queue_create(self) -> None:
self.main_queue = asyncio.Queue()
async def main_event_hook(payload: dict):
asyncio.run_coroutine_threadsafe(self.main_queue.put(payload), loop=self.local_loop)
self.main_event_hook = main_event_hook
self.discord_socket_loop.call_soon_threadsafe(func_partial
(self.discord_socket.event_hooks.append, self.main_event_hook))
self.event_dispatcher_task = self.local_loop.create_task(self.event_dispatcher())
def _main_queue_delete(self) -> None:
self.event_dispatcher_task.cancel()
self.discord_socket_loop.call_soon_threadsafe(func_partial(
self.discord_socket.event_hooks.remove_async, self.main_event_hook))
self.main_queue = None
async def event_dispatcher(self) -> None:
while True:
payload = await self.main_queue.get()
for l in self.listeners.values():
asyncio.ensure_future(l(payload['t'], payload['d']), loop=self.local_loop)
def event_queue_create(self, event_name_qualifier: str = '') -> 'DiscordEventQueue':
if self.listeners_count == 0:
self._main_queue_create()
return self.DiscordEventQueue(self, event_name_qualifier)
class DiscordEventQueue(asyncio.Queue):
def __init__(self, parent: 'DiscordSocketThread', event_name: str):
super().__init__(loop=parent.local_loop)
self.parent = parent
self.event_name = event_name
def __enter__(self) -> None:
async def queue_hook(event_name: str, event_data: dict):
if event_name == self.event_name:
await self.put(event_data)
self.parent.listeners[self] = queue_hook
self.parent.listeners_count += 1
def __exit__(self, exception_type, exception_value, traceback) -> None:
self.parent.listeners.pop(self)
self.parent.listeners_count -= 1
if self.parent.listeners_count == 0:
self.parent._main_queue_delete()
def event_event_create(self, condition_func: typing.Callable, event_name: str):
if self.listeners_count == 0:
self._main_queue_create()
return self.DiscordEventEvent(self, condition_func, event_name)
class DiscordEventEvent(asyncio.Event):
def __init__(self, parent: 'DiscordSocketThread', condition_func: typing.Callable, event_name: str):
super().__init__()
self.parent = parent
self.event_name = event_name
self.condition_func = condition_func
async def __call__(self) -> None:
async def event_hook(event_name: str, event_data: dict):
if event_name == self.event_name:
try:
if self.condition_func(event_data):
self.set()
except KeyError:
pass
self.parent.listeners[self] = event_hook
self.parent.listeners_count += 1
await self.wait()
self.parent.listeners.pop(self)
self.parent.listeners_count -= 1
if self.parent.listeners_count == 0:
self.parent._main_queue_delete()
# endregion
|
util.py
|
# -*- coding: utf-8 -*-
"""
(C) 2014-2019 Roman Sirokov and contributors
Licensed under BSD license
http://github.com/r0x0r/pywebview/
"""
import inspect
import json
import logging
import os
import re
import sys
import traceback
from platform import architecture
from threading import Thread
from uuid import uuid4
from .js import api, npo, dom, event
_token = uuid4().hex
default_html = '<!doctype html><html><head></head><body></body></html>'
logger = logging.getLogger('pywebview')
class WebViewException(Exception):
pass
def base_uri(relative_path=''):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
if 'pytest' in sys.modules:
for arg in reversed(sys.argv):
path = os.path.realpath(arg.split('::')[0])
if os.path.exists(path):
base_path = path if os.path.isdir(path) else os.path.dirname(path)
break
else:
base_path = os.path.dirname(os.path.realpath(sys.argv[0]))
if not os.path.exists(base_path):
raise ValueError('Path %s does not exist' % base_path)
return 'file://%s' % os.path.join(base_path, relative_path)
def convert_string(string):
if sys.version < '3':
return unicode(string)
else:
return str(string)
def parse_file_type(file_type):
'''
:param file_type: file type string 'description (*.file_extension1;*.file_extension2)' as required by file filter in create_file_dialog
:return: (description, file extensions) tuple
'''
valid_file_filter = r'^([\w ]+)\((\*(?:\.(?:\w+|\*))*(?:;\*\.\w+)*)\)$'
match = re.search(valid_file_filter, file_type)
if match:
return match.group(1).rstrip(), match.group(2)
else:
raise ValueError('{0} is not a valid file filter'.format(file_type))
def parse_api_js(window, platform, uid=''):
def get_args(f):
return list(inspect.getfullargspec(f).args)
def generate_func():
if window._js_api:
functions = { name: get_args(getattr(window._js_api, name))[1:] for name in dir(window._js_api) if callable(getattr(window._js_api, name)) and not name.startswith('_')}
else:
functions = {}
if len(window._functions) > 0:
expose_functions = { name: get_args(f) for name, f in window._functions.items()}
else:
expose_functions = {}
functions.update(expose_functions)
functions = functions.items()
return [ {'func': name, 'params': params} for name, params in functions ]
try:
func_list = generate_func()
except Exception as e:
logger.exception(e)
js_code = npo.src + event.src + api.src % (_token, platform, uid, func_list) + dom.src
return js_code
def js_bridge_call(window, func_name, param, value_id):
def _call():
try:
result = func(*func_params.values())
result = json.dumps(result).replace('\\', '\\\\').replace('\'', '\\\'')
code = 'window.pywebview._returnValues["{0}"]["{1}"] = {{value: \'{2}\'}}'.format(func_name, value_id, result)
except Exception as e:
error = {
'message': str(e),
'name': type(e).__name__,
'stack': traceback.format_exc()
}
result = json.dumps(error).replace('\\', '\\\\').replace('\'', '\\\'')
code = 'window.pywebview._returnValues["{0}"]["{1}"] = {{isError: true, value: \'{2}\'}}'.format(func_name, value_id, result)
window.evaluate_js(code)
func = window._functions.get(func_name) or getattr(window._js_api, func_name, None)
if func is not None:
try:
func_params = param if not param else json.loads(param)
t = Thread(target=_call)
t.start()
except Exception:
logger.exception('Error occurred while evaluating function {0}'.format(func_name))
else:
logger.error('Function {}() does not exist'.format(func_name))
def escape_string(string):
return string\
.replace('\\', '\\\\') \
.replace('"', r'\"') \
.replace('\n', r'\n')\
.replace('\r', r'\r')
def transform_url(url):
if url and '://' not in url:
return base_uri(url)
else:
return url
def make_unicode(string):
"""
Python 2 and 3 compatibility function that converts a string to Unicode. In case of Unicode, the string is returned
unchanged
:param string: input string
:return: Unicode string
"""
if sys.version < '3' and isinstance(string, str):
return unicode(string.decode('utf-8'))
return string
def escape_line_breaks(string):
return string.replace('\\n', '\\\\n').replace('\\r', '\\\\r')
def inject_base_uri(content, base_uri):
pattern = r'<%s(?:[\s]+[^>]*|)>'
base_tag = '<base href="%s">' % base_uri
match = re.search(pattern % 'base', content)
if match:
return content
match = re.search(pattern % 'head', content)
if match:
tag = match.group()
return content.replace(tag, tag + base_tag)
match = re.search(pattern % 'html', content)
if match:
tag = match.group()
return content.replace(tag, tag + base_tag)
match = re.search(pattern % 'body', content)
if match:
tag = match.group()
return content.replace(tag, base_tag + tag)
return base_tag + content
def interop_dll_path(dll_name):
if dll_name == 'WebBrowserInterop.dll':
dll_name = 'WebBrowserInterop.x64.dll' if architecture()[0] == '64bit' else 'WebBrowserInterop.x86.dll'
# Unfrozen path
dll_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'lib', dll_name)
if os.path.exists(dll_path):
return dll_path
# Frozen path, dll in the same dir as the executable
dll_path = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), dll_name)
if os.path.exists(dll_path):
return dll_path
try:
# Frozen path packed as onefile
dll_path = os.path.join(sys._MEIPASS, dll_name)
if os.path.exists(dll_path):
return dll_path
except Exception:
pass
raise Exception('Cannot find %s' % dll_name)
|
mp_video.py
|
'''
Copyright 2019 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# Multiprocessing video with queue
import multiprocessing as mp
import os
import cv2
import time
import ctypes
import threading
from detect_ap2 import det_preprocess, det_postprocess
from vai.dpuv1.rt import xdnn
import numpy as np
##################################################
# CONSTANTS
##################################################
num_shared_slots = 200
# Current version does copies...
# Assumes all types are np.float32/ctypes.c_float
class SharedMemoryQueue:
def __init__(self, name, length, buf_shapes_list):
print "Creating SharedMemoryQueue",name
self._name = name
self._len = length
# Hard coded for floats...
self._mem_type = ctypes.c_float
self._np_type = np.float32
# put() function copies into the free list
self._freeList = mp.Queue(length)
# get() function gets id of open slot. consumer needs to confirm when data is read
self._readList = mp.Queue(length)
self._buf_shapes_list = buf_shapes_list
self._buf_sizes_list = map(lambda x: np.prod(x), buf_shapes_list)
print "Creating Shared Memory with buf_shape_list=",self._buf_shapes_list
self._shared_memory_arrs = list()
for i in range(length):
buf_list = list()
for buf_size in self._buf_sizes_list:
buf_list.append(mp.Array(self._mem_type, buf_size))
self._shared_memory_arrs.append(buf_list)
self._freeList.put(i)
def close(self):
self._readList.put(None)
def accessBuffer(self, slot_id):
return self._shared_memory_arrs[slot_id]
def accessNumpyBuffer(self, slot_id):
buf_list = list()
for i in range(len(self._buf_shapes_list)):
np_arr = np.frombuffer(self._shared_memory_arrs[slot_id][i].get_obj(), dtype = self._np_type)
np_arr = np.reshape(np_arr, self._buf_shapes_list[i], order = 'C')
buf_list.append(np_arr)
return buf_list
def openWriteId(self):
id = self._freeList.get()
return id
def closeWriteId(self, id):
# finished writing slot id
self._readList.put(id)
def openReadId(self):
id = self._readList.get()
return id
def closeReadId(self, id):
# finished reading slot id
self._freeList.put(id)
def dump(self):
for i in range(self._len):
buf_list = self.accessNumpyBUffer(i)
for np_arr in buf_list:
print "Slot=",i,"Array=",j,"Val=",np_arr
import inspect
def funcname():
return inspect.stack()[1][0].f_code.co_name
def cam_loop(shared_frame_arrs, ready_fpga):
cap = cv2.VideoCapture('Pedestrians.mp4')
# First read frames into a list
frames = []
while cap.isOpened():
s, frame = cap.read()
if s:
frame = cv2.resize(frame, (320, 320), interpolation = cv2.INTER_LINEAR)
frames.append(frame)
else:
break
ready_fpga.get()
# Then send frames to shared memory as fast as possible for measuring pipeline performance
print funcname(),"Putting Frames..."
start_time = time.time()
frame_cnt = 0
for f in frames:
write_slot = shared_frame_arrs.openWriteId()
np_arr = shared_frame_arrs.accessNumpyBuffer(write_slot)
np_arr[0][:] = f
shared_frame_arrs.closeWriteId(write_slot)
frame_cnt += 1
end_time = time.time()
shared_frame_arrs.close()
print funcname(),"Video Ending! Frame Count = ",frame_cnt
print('{0} cam loading time: {1} seconds'.format(funcname(),end_time - start_time))
def detect_pre(shared_frame_arrs, shared_trans_arrs):
start_time = None
frame_id = 0
while True:
read_slot = shared_frame_arrs.openReadId()
if start_time is None:
start_time = time.time()
if read_slot is None:
break
read_arrs = shared_frame_arrs.accessNumpyBuffer(read_slot)
frame_id += 1
write_slot = shared_trans_arrs.openWriteId()
write_arrs = shared_trans_arrs.accessNumpyBuffer(write_slot)
det_preprocess(read_arrs[0], write_arrs[1])
shared_frame_arrs.closeReadId(read_slot)
shared_trans_arrs.closeWriteId(write_slot)
end_time = time.time()
shared_trans_arrs.close()
print "Ran",frame_id,"frames"
print('detect_preprocessing time: {0} seconds'.format(end_time - start_time))
import run_fpga
# helper thread just to read output from XFDNN and pass onto appropriate queue
def fpga_wait( fpgaRT, qWait, shared_output_arrs):
numProcessed = 0
frame_id = 0
while True:
write_slot = qWait.get()
if write_slot is None:
break
fpgaRT.get_result(write_slot)
write_slot_arrs = shared_output_arrs.accessNumpyBuffer(write_slot)
shared_output_arrs.closeWriteId(write_slot)
frame_id += 1
def detect_forward(shared_trans_arrs, shared_output_arrs, ready_fpga):
VAI_ALVEO_ROOT = os.getenv("VAI_ALVEO_ROOT","/opt/ml-suite")
MLSUITE_PLATFORM = os.getenv("MLSUITE_PLATFORM","alveo-u200")
param_str = "{\'batch_sz\': 1," +\
"\'outtrainproto\': None," +\
"\'input_names\': [u\'data\']," +\
"\'cutAfter\': \'data\'," +\
"\'outproto\': \'xfdnn_deploy.prototxt\'," +\
"\'xdnnv3\': True," +\
"\'inproto\': \'deploy.prototxt\'," +\
"\'profile\': False," +\
"\'trainproto\': None," +\
"\'weights\': \'deploy.caffemodel_data.h5\'," +\
"\'netcfg\': \'deploy.compiler.json\'," +\
"\'quantizecfg\': \'deploy.compiler_quant.json\'," +\
"\'xclbin\': \'" + VAI_ALVEO_ROOT + "/overlaybins/" + MLSUITE_PLATFORM + "/overlay_4.xclbin\'," +\
"\'output_names\': [u\'pixel-conv\', u\'bb-output\']," +\
"\'overlaycfg\': {u\'XDNN_NUM_KERNELS\': u\'2\', u\'SDX_VERSION\': u\'2018.2\', u\'XDNN_VERSION_MINOR\': u\'0\', u\'XDNN_SLR_IDX\': u\'1, 1\', u\'XDNN_DDR_BANK\': u\'0, 3\', u\'XDNN_CSR_BASE\': u\'0x1800000, 0x1810000\', u\'XDNN_BITWIDTH\': u\'8\', u\'DSA_VERSION\': u\'xilinx_u200_xdma_201820_1\', u\'XDNN_VERSION_MAJOR\': u\'3\'}}"
det = run_fpga.RunFPGA(param_str)
ready_fpga.put(1)
qWait = mp.Queue(maxsize=100)
t = threading.Thread(target=fpga_wait, args=(det._fpgaRT, qWait, shared_output_arrs))
t.start()
frame_id = 0
start_time = None
while True:
read_slot = shared_trans_arrs.openReadId()
if start_time is None:
start_time = time.time()
if read_slot is None:
break
read_slot_arrs = shared_trans_arrs.accessNumpyBuffer(read_slot)
write_slot = shared_output_arrs.openWriteId()
write_slot_arrs = shared_output_arrs.accessNumpyBuffer(write_slot)
out_dict = det.forward_async(read_slot_arrs[1:], write_slot_arrs[1:], write_slot)
shared_trans_arrs.closeReadId(read_slot)
qWait.put(write_slot)
frame_id += 1
end_time = time.time()
shared_output_arrs.close()
print('detect forward time: {0} seconds'.format(end_time - start_time))
qWait.put(None)
t.join()
def detect_post(shared_output_arrs, face_q):
start_time = None
frame_cnt = 0
while True:
read_slot = shared_output_arrs.openReadId()
if start_time is None:
start_time = time.time()
if read_slot is None:
break
read_slot_arrs = shared_output_arrs.accessNumpyBuffer(read_slot)
face_rects = det_postprocess(read_slot_arrs[1], read_slot_arrs[2], [320,320,3])
shared_output_arrs.closeReadId(read_slot)
frame_cnt += 1
face_q.put(face_rects)
end_time = time.time()
face_q.put(None)
print('detect post processing time: {0} seconds'.format(end_time - start_time))
total_time = end_time-start_time
print('Total run: {0} frames in {1} seconds ({2} fps)'.format(frame_cnt, total_time, frame_cnt/total_time))
def show_loop(face_q):
cv2.namedWindow('face_detection')
frame_id = 0
cap = cv2.VideoCapture('Pedestrians.mp4')
start_time = None
while cap.isOpened():
s, frame = cap.read()
if s:
frame = cv2.resize(frame, (320, 320), interpolation = cv2.INTER_LINEAR)
else:
break
face_rects = face_q.get()
if face_rects is None:
break
if start_time is None:
start_time = time.time()
# Show every modulo frame for performance reasons...
if frame_id % 1 == 0:
for face_rect in face_rects:
cv2.rectangle(frame,(face_rect[0],face_rect[1]),(face_rect[2],face_rect[3]),(0,255,0),2)
cv2.imshow('face_detection', frame)
cv2.waitKey(40)
frame_id += 1
end_time = time.time()
print('drawing boxes time: {0} seconds'.format(end_time - start_time))
if __name__ == '__main__':
frame_q = mp.Queue()
resize_q = mp.Queue()
trans_q = mp.Queue()
output_q = mp.Queue()
face_q = mp.Queue()
ready_fpga = mp.Queue()
sharedInputArrs = []
compilerJSONObj = xdnn.CompilerJsonParser('deploy.compiler.json')
input_shapes = map(lambda x: tuple(x), compilerJSONObj.getInputs().itervalues())
output_shapes = map(lambda x: tuple(x), compilerJSONObj.getOutputs().itervalues())
input_sizes = map(lambda x: np.prod(x), input_shapes)
output_sizes = map(lambda x: np.prod(x), output_shapes)
print input_shapes
print output_shapes
# shared memory from video capture to preprocessing
shared_frame_arrs = SharedMemoryQueue("frame",num_shared_slots, [(320,320,3)])
# shared memory from preprocessing to fpga forward
shared_trans_arrs = SharedMemoryQueue("trans",num_shared_slots, [(320,320,3)]+input_shapes)
# shared memory from fpga forward to postprocessing
shared_output_arrs = SharedMemoryQueue("output",num_shared_slots, [(320,320,3)]+output_shapes)
# shared memory from postprocessing to display
shared_display_arrs = SharedMemoryQueue("display",num_shared_slots, [320*320*3])
cam_process = mp.Process(target=cam_loop,args=(shared_frame_arrs,ready_fpga, ))
detect_process1 = mp.Process(target=detect_pre,args=(shared_frame_arrs,shared_trans_arrs, ))
detect_process2 = mp.Process(target=detect_forward,args=(shared_trans_arrs, shared_output_arrs, ready_fpga, ))
detect_process3 = mp.Process(target=detect_post,args=(shared_output_arrs, face_q, ))
show_process = mp.Process(target=show_loop,args=(face_q, ))
start_time = time.time()
cam_process.start()
detect_process1.start()
detect_process2.start()
detect_process3.start()
show_process.start()
# Waits for cam_process to finish video...
show_process.join()
end_time = time.time()
print('total process time: {0} seconds'.format(end_time - start_time))
# Now kill remaining processes...
cam_process.terminate()
detect_process1.terminate()
detect_process2.terminate()
detect_process3.terminate()
show_process.terminate()
|
storage.py
|
#
# Copyright (c) 2019-2021, ETH Zurich. All rights reserved.
#
# Please, refer to the LICENSE file in the root directory.
# SPDX-License-Identifier: BSD-3-Clause
#
from flask import Flask, request, jsonify
import json, tempfile, os
import urllib
import datetime
import async_task
import threading
# logging handler
from logging.handlers import TimedRotatingFileHandler
# common functions
from cscs_api_common import check_auth_header, get_username
from cscs_api_common import create_task, update_task, get_task_status
from cscs_api_common import exec_remote_command
from cscs_api_common import create_certificate
from cscs_api_common import in_str
from cscs_api_common import is_valid_file, is_valid_dir, check_command_error, get_boolean_var
# job_time_checker for correct SLURM job time in /xfer-internal tasks
import job_time
# for debug purposes
import logging
import requests
from hashlib import md5
import stat
from cryptography.fernet import Fernet
import time
## READING vars environment vars
CERTIFICATOR_URL = os.environ.get("F7T_CERTIFICATOR_URL")
TASKS_URL = os.environ.get("F7T_TASKS_URL")
COMPUTE_URL = os.environ.get("F7T_COMPUTE_URL")
KONG_URL = os.environ.get("F7T_KONG_URL")
STORAGE_PORT = os.environ.get("F7T_STORAGE_PORT", 5000)
AUTH_HEADER_NAME = 'Authorization'
# Machines for Storage:
# Filesystem DNS or IP where to download or upload files:
SYSTEMS_INTERNAL_STORAGE = os.environ.get("F7T_SYSTEMS_INTERNAL_STORAGE").strip('\'"')
# Job machine where to send xfer-internal jobs (must be defined in SYSTEMS_PUBLIC)
STORAGE_JOBS_MACHINE = os.environ.get("F7T_STORAGE_JOBS_MACHINE").strip('\'"')
# SYSTEMS_PUBLIC: list of allowed systems
# remove quotes and split into array
SYSTEMS_PUBLIC = os.environ.get("F7T_SYSTEMS_PUBLIC").strip('\'"').split(";")
# internal machines to submit/query jobs
SYS_INTERNALS = os.environ.get("F7T_SYSTEMS_INTERNAL_COMPUTE").strip('\'"').split(";")
# internal machines for small operations
SYS_INTERNALS_UTILITIES = os.environ.get("F7T_SYSTEMS_INTERNAL_UTILITIES").strip('\'"').split(";")
###### ENV VAR FOR DETECT TECHNOLOGY OF STAGING AREA:
OBJECT_STORAGE = os.environ.get("F7T_OBJECT_STORAGE", "").strip('\'"')
# Scheduller partition used for internal transfers
XFER_PARTITION = os.environ.get("F7T_XFER_PARTITION", "").strip('\'"')
# --account parameter needed in sbatch?
USE_SLURM_ACCOUNT = get_boolean_var(os.environ.get("F7T_USE_SLURM_ACCOUNT", False))
# Machine used for external transfers
EXT_TRANSFER_MACHINE_PUBLIC=os.environ.get("F7T_EXT_TRANSFER_MACHINE_PUBLIC", "").strip('\'"')
EXT_TRANSFER_MACHINE_INTERNAL=os.environ.get("F7T_EXT_TRANSFER_MACHINE_INTERNAL", "").strip('\'"')
OS_AUTH_URL = os.environ.get("F7T_OS_AUTH_URL")
OS_IDENTITY_PROVIDER = os.environ.get("F7T_OS_IDENTITY_PROVIDER")
OS_IDENTITY_PROVIDER_URL= os.environ.get("F7T_OS_IDENTITY_PROVIDER_URL")
OS_PROTOCOL = os.environ.get("F7T_OS_PROTOCOL")
OS_INTERFACE = os.environ.get("F7T_OS_INTERFACE")
OS_PROJECT_ID = os.environ.get("F7T_OS_PROJECT_ID")
# SECRET KEY for temp url without using Token
SECRET_KEY = os.environ.get("F7T_SECRET_KEY")
# Expiration time for temp URLs in seconds, by default 30 days
STORAGE_TEMPURL_EXP_TIME = int(os.environ.get("F7T_STORAGE_TEMPURL_EXP_TIME", "2592000").strip('\'"'))
# max file size for temp URLs in MegaBytes, by default 5120 MB = 5 GB
STORAGE_MAX_FILE_SIZE = int(os.environ.get("F7T_STORAGE_MAX_FILE_SIZE", "5120").strip('\'"'))
# for use on signature of URL it must be in bytes (MB*1024*1024 = Bytes)
STORAGE_MAX_FILE_SIZE *= 1024*1024
UTILITIES_TIMEOUT = int(os.environ.get("F7T_UTILITIES_TIMEOUT", "5").strip('\'"'))
STORAGE_POLLING_INTERVAL = int(os.environ.get("F7T_STORAGE_POLLING_INTERVAL", "60").strip('\'"'))
CERT_CIPHER_KEY = os.environ.get("F7T_CERT_CIPHER_KEY", "").strip('\'"').encode('utf-8')
### SSL parameters
USE_SSL = get_boolean_var(os.environ.get("F7T_USE_SSL", False))
SSL_CRT = os.environ.get("F7T_SSL_CRT", "")
SSL_KEY = os.environ.get("F7T_SSL_KEY", "")
# verify signed SSL certificates
SSL_SIGNED = get_boolean_var(os.environ.get("F7T_SSL_SIGNED", False))
# aynchronous tasks: upload & download --> http://TASKS_URL
# {task_id : AsyncTask}
storage_tasks = {}
# relationship between upload task and filesystem
# {hash_id : {'user':user,'system':system,'target':path,'source':fileName,'status':status_code, hash_id':task_id}}
uploaded_files = {}
# debug on console
debug = get_boolean_var(os.environ.get("F7T_DEBUG_MODE", False))
app = Flask(__name__)
def file_to_str(fileName):
str_file = ""
try:
fileObj = open(fileName,"r")
str_file = fileObj.read()
fileObj.close()
return str_file
except IOError as e:
app.logger.error(e)
return ""
def str_to_file(str_file,dir_name,file_name):
try:
if not os.path.exists(dir_name):
app.logger.info(f"Created temp directory for certs in {dir_name}")
os.makedirs(dir_name)
file_str = open(f"{dir_name}/{file_name}","w")
file_str.write(str_file)
file_str.close()
app.logger.info(f"File written in {dir_name}/{file_name}")
except IOError as e:
app.logger.error("Couldn't write file {dir_name}/{file_name}")
app.logger.error(e)
def os_to_fs(task_id):
upl_file = uploaded_files[task_id]
system_name = upl_file["system_name"]
system_addr = upl_file["system_addr"]
username = upl_file["user"]
objectname = upl_file["source"]
try:
app.logger.info(upl_file["msg"])
action = upl_file["msg"]["action"]
# certificate is encrypted with CERT_CIPHER_KEY key
# here is decrypted
cert = upl_file["msg"]["cert"]
cipher = Fernet(CERT_CIPHER_KEY)
# the decryption process produces a byte type
# remember that is stored as str not as byte in the JSON
pub_cert = cipher.decrypt(cert[0].encode('utf-8')).decode('utf-8')
# cert_pub in 0 /user-key-cert.pub
# temp-dir in 1
# get tmp directory
td = cert[1]
app.logger.info(f"Temp dir: {td}")
if not os.path.exists(td):
# retrieve public certificate and store in temp dir location
str_to_file(pub_cert,td,"user-key-cert.pub")
# user public and private key should be in Storage / path, symlinking in order to not use the same key at the same time
os.symlink(os.getcwd() + "/user-key.pub", td + "/user-key.pub") # link on temp dir
os.symlink(os.getcwd() + "/user-key", td + "/user-key") # link on temp dir
# stat.S_IRUSR -> owner has read permission
os.chmod(td + "/user-key-cert.pub", stat.S_IRUSR)
cert_list = [f"{td}/user-key-cert.pub", f"{td}/user-key.pub", f"{td}/user-key", td]
# start download from OS to FS
update_task(task_id,None,async_task.ST_DWN_BEG)
# execute download
result = exec_remote_command(username, system_name, system_addr, "", "storage_cert", cert_list)
# if no error, then download is complete
if result["error"] == 0:
update_task(task_id, None, async_task.ST_DWN_END)
# No need to delete the dictionary, it will be cleaned on next iteration
# delete upload request
# del uploaded_files[task_id]
# must be deleted after object is moved to storage
# staging.delete_object(containername=username,prefix=task_id,objectname=objectname)
# for big files delete_object consumes a long time and often gives a TimeOut error between system and staging area
# Therefore, using delete_object_after a few minutes (in this case 5 minutes) will trigger internal staging area
# mechanism to delete the file automatically and without a need of a connection
staging.delete_object_after(containername=username,prefix=task_id,objectname=objectname, ttl = int(time.time())+600)
# if error, should be prepared for try again
else:
# app.logger.error(result["msg"])
upl_file["status"] = async_task.ST_DWN_ERR
uploaded_files[task_id] = upl_file
# update but conserv "msg" as the data for download to OS, to be used for retry in next iteration
update_task(task_id,None, async_task.ST_DWN_ERR, msg = upl_file, is_json = True)
except Exception as e:
app.logger.error(e)
# asynchronous check of upload_files to declare which is downloadable to FS
def check_upload_files():
global staging
while True:
# Get updated task status from Tasks microservice DB backend (TaskPersistence)
get_upload_unfinished_tasks()
# Timestampo for logs
timestamp = time.asctime( time.localtime(time.time()) )
app.logger.info(f"Check files in Object Storage {timestamp}")
app.logger.info(f"Pendings uploads: {len(uploaded_files)}")
# create STATIC auxiliary upload list in order to avoid "RuntimeError: dictionary changed size during iteration"
# (this occurs since upload_files dictionary is shared between threads and since Python3 dict.items() trigger that error)
upl_list= [(task_id, upload) for task_id,upload in uploaded_files.items()]
for task_id,upload in upl_list:
#checks if file is ready or not for download to FileSystem
try:
task_status = async_task.status_codes[upload['status']]
app.logger.info(f"Status of {task_id}: {task_status}")
#if upload["status"] in [async_task.ST_URL_REC,async_task.ST_DWN_ERR] :
if upload["status"] == async_task.ST_URL_REC:
app.logger.info(f"Task {task_id} -> File ready to upload or already downloaded")
upl = uploaded_files[task_id]
# app.logger.info(upl)
containername = upl["user"]
prefix = task_id
objectname = upl["source"]
if not staging.is_object_created(containername,prefix,objectname):
app.logger.info(f"{containername}/{prefix}/{objectname} isn't created in staging area, continue polling")
continue
# confirms that file is in OS (auth_header is not needed)
update_task(task_id, None, async_task.ST_UPL_CFM, msg = upload, is_json = True)
upload["status"] = async_task.ST_UPL_CFM
uploaded_files["task_id"] = upload
os_to_fs_task = threading.Thread(target=os_to_fs,args=(task_id,))
os_to_fs_task.start()
# if the upload to OS is done but the download to FS failed, then resume
elif upload["status"] == async_task.ST_DWN_ERR:
upl = uploaded_files[task_id]
containername = upl["user"]
prefix = task_id
objectname = upl["source"]
# if file has been deleted from OS, then erroneous upload process. Restart.
if not staging.is_object_created(containername,prefix,objectname):
app.logger.info(f"{containername}/{prefix}/{objectname} isn't created in staging area, task marked as erroneous")
update_task(task_id, None ,async_task.ERROR, "File was deleted from staging area. Start a new upload process")
upload["status"] = async_task.ERROR
continue
# if file is still in OS, proceed to new download to FS
update_task(task_id, None, async_task.ST_DWN_BEG)
upload["status"] = async_task.ST_DWN_BEG
uploaded_files["task_id"] = upload
os_to_fs_task = threading.Thread(target=os_to_fs,args=(task_id,))
os_to_fs_task.start()
except Exception as e:
app.logger.error(type(e), e)
continue
time.sleep(STORAGE_POLLING_INTERVAL)
# async task for download large files
# user: user in the posix file system
# system: system in which the file will be stored (REMOVE later)
# sourcePath: path in FS where the object is
# task_id: async task id given for Tasks microservice
def download_task(auth_header,system_name, system_addr,sourcePath,task_id):
object_name = sourcePath.split("/")[-1]
global staging
# check if staging area token is valid
if not staging.is_token_valid():
if not staging.authenticate():
msg = "Staging area auth error"
update_task(task_id, auth_header, async_task.ERROR, msg)
return
# create container if it doesn't exists:
container_name = get_username(auth_header)
if not staging.is_container_created(container_name):
errno = staging.create_container(container_name)
if errno == -1:
msg="Could not create container {container_name} in Staging Area ({staging_name})".format(container_name=container_name, staging_name=staging.get_object_storage())
update_task(task_id, auth_header, async_task.ERROR, msg)
return
# upload file to swift
object_prefix = task_id
upload_url = staging.create_upload_form(sourcePath, container_name, object_prefix, STORAGE_TEMPURL_EXP_TIME, STORAGE_MAX_FILE_SIZE)
# advice Tasks that upload begins:
update_task(task_id, auth_header, async_task.ST_UPL_BEG)
# upload starts:
res = exec_remote_command(auth_header,system_name, system_addr,upload_url["command"])
# if upload to SWIFT fails:
if res["error"] != 0:
msg = "Upload to Staging area has failed. Object: {object_name}".format(object_name=object_name)
error_str = res["msg"]
if in_str(error_str,"OPENSSH"):
error_str = "User does not have permissions to access machine"
msg = f"{msg}. {error_str}"
app.logger.error(msg)
update_task(task_id, auth_header,async_task.ST_UPL_ERR, msg)
return
# get Download Temp URL with [seconds] time expiration
# create temp url for file: valid for STORAGE_TEMPURL_EXP_TIME seconds
temp_url = staging.create_temp_url(container_name, object_prefix, object_name, STORAGE_TEMPURL_EXP_TIME)
# if error raises in temp url creation:
if temp_url == None:
msg = "Temp URL creation failed. Object: {object_name}".format(object_name=object_name)
update_task(task_id, auth_header, async_task.ERROR, msg)
return
# if succesfully created: temp_url in task with success status
update_task(task_id, auth_header, async_task.ST_UPL_END, temp_url)
# marked deletion from here to STORAGE_TEMPURL_EXP_TIME (default 30 days)
retval = staging.delete_object_after(containername=container_name,prefix=object_prefix,objectname=object_name,ttl=int(time.time()) + STORAGE_TEMPURL_EXP_TIME)
if retval == 0:
app.logger.info("Setting {seconds} [s] as X-Delete-At".format(seconds=STORAGE_TEMPURL_EXP_TIME))
else:
app.logger.error("Object couldn't be marked as X-Delete-At")
# download large file, returns temp url for downloading
@app.route("/xfer-external/download", methods=["POST"])
@check_auth_header
def download_request():
auth_header = request.headers[AUTH_HEADER_NAME]
system_addr = EXT_TRANSFER_MACHINE_INTERNAL
system_name = EXT_TRANSFER_MACHINE_PUBLIC
sourcePath = request.form["sourcePath"] # path file in cluster
if sourcePath == None or sourcePath == "":
data = jsonify(error="Source path not set in request")
return data, 400
# checks if sourcePath is a valid path
check = is_valid_file(sourcePath, auth_header, system_name, system_addr)
if not check["result"]:
return jsonify(description="sourcePath error"), 400, check["headers"]
# obtain new task from Tasks microservice
task_id = create_task(auth_header, service="storage")
# couldn't create task
if task_id == -1:
data = jsonify(error="Couldn't create task")
return data, 400
# asynchronous task creation
aTask = threading.Thread(target=download_task,
args=(auth_header, system_name, system_addr, sourcePath, task_id))
storage_tasks[task_id] = aTask
try:
update_task(task_id, auth_header, async_task.QUEUED)
storage_tasks[task_id].start()
task_url = "{kong_url}/tasks/{task_id}".format(kong_url=KONG_URL, task_id=task_id)
data = jsonify(success="Task created", task_url=task_url, task_id=task_id)
return data, 201
except Exception as e:
data = jsonify(error=e)
return data, 400
# invalidate temp URLs
# parameters:
# - X-Task-Id: task id of the transfer related to the URL that wants to be invalidated
@app.route("/xfer-external/invalidate", methods=["POST"])
@check_auth_header
def invalidate_request():
try:
task_id = request.headers["X-Task-Id"]
except KeyError as e:
return jsonify(error="Header X-Task-Id missing"), 400
auth_header = request.headers[AUTH_HEADER_NAME]
# search if task belongs to the user
task_status = get_task_status(task_id, auth_header)
if task_status == -1:
return jsonify(error="Invalid X-Task-Id"), 400
containername = get_username(auth_header)
prefix = task_id
objects = staging.list_objects(containername,prefix)
for objectname in objects:
# error = staging.delete_object(containername,prefix,objectname)
# replacing delete_object by delete_object_after 5 minutes
error = staging.delete_object_after(containername=containername, prefix=prefix, objectname=objectname, ttl=int(time.time())+600)
if error == -1:
return jsonify(error="Could not invalidate URL"), 400
return jsonify(success="URL invalidated successfully"), 201
# async task for upload large files
# user: user in the posix file system
# system: system in which the file will be stored (REMOVE later)
# targetPath: absolute path in which to store the file
# sourcePath: absolute path in local FS
# task_id: async task_id created with Tasks microservice
def upload_task(auth_header,system_name, system_addr,targetPath,sourcePath,task_id):
fileName = sourcePath.split("/")[-1]
# container to bind:
container_name = get_username(auth_header)
# change hash_id for task_id since is not longer needed for (failed) redirection
uploaded_files[task_id] = {"user": container_name,
"system_name": system_name,
"system_addr": system_addr,
"target": targetPath,
"source": fileName,
"status": async_task.ST_URL_ASK,
"hash_id": task_id}
data = uploaded_files[task_id]
global staging
data["msg"] = "Waiting for Presigned URL to upload file to staging area ({})".format(staging.get_object_storage())
# change to dictionary containing upload data (for backup purpouses) and adding url call
update_task(task_id, auth_header, async_task.ST_URL_ASK, data, is_json=True)
# check if staging token is valid
if not staging.is_token_valid():
if not staging.authenticate():
data = uploaded_files[task_id]
msg = "Staging Area auth error, try again later"
data["msg"] = msg
data["status"] = async_task.ERROR
update_task(task_id, auth_header, async_task.ERROR, data, is_json=True)
return
# create or return container
if not staging.is_container_created(container_name):
errno = staging.create_container(container_name)
if errno == -1:
data = uploaded_files[task_id]
msg="Could not create container {container_name} in Staging Area ({staging_name})".format(container_name=container_name, staging_name=staging.get_object_storage())
data["msg"] = msg
data["status"] = async_task.ERROR
update_task(task_id,auth_header,async_task.ERROR,data,is_json=True)
return
object_prefix = task_id
# create temporary upload form
resp = staging.create_upload_form(sourcePath, container_name, object_prefix, STORAGE_TEMPURL_EXP_TIME, STORAGE_MAX_FILE_SIZE)
data = uploaded_files[task_id]
# create download URL for later download from Object Storage to filesystem
app.logger.info("Creating URL for later download")
download_url = staging.create_temp_url(container_name, object_prefix, fileName, STORAGE_TEMPURL_EXP_TIME)
# create certificate for later download from OS to filesystem
app.logger.info("Creating certificate for later download")
options = f"-s -G -o {targetPath}/{fileName} -- '{download_url}'"
exp_time = STORAGE_TEMPURL_EXP_TIME
certs = create_certificate(auth_header, system_name, system_addr, "curl", options, exp_time)
if not certs[0]:
data = uploaded_files[task_id]
msg="Could not create credentials for download from Staging Area to filesystem"
app.logger.error(msg)
data["msg"] = msg
data["status"] = async_task.ERROR
update_task(task_id,auth_header,async_task.ERROR,data,is_json=True)
return
# converts file to string to store in Tasks
cert_pub = file_to_str(fileName=certs[0])
# key_pub = file_to_str(fileName=certs[1])
# key_priv = file_to_str(fileName=certs[2])
temp_dir = certs[3]
# encrypt certificate with CERT_CIPHER_KEY key
cipher = Fernet(CERT_CIPHER_KEY)
# data to be encrypted should be encoded to bytes
# in order to save it as json, the cert encrypted should be decoded to string
cert_pub_enc = cipher.encrypt(cert_pub.encode('utf-8')).decode('utf-8')
resp["download_url"] = download_url
resp["action"] = f"curl {options}"
resp["cert"] = [cert_pub_enc, temp_dir]
data["msg"] = resp
data["status"] = async_task.ST_URL_REC
app.logger.info("Cert and url created correctly")
update_task(task_id,auth_header,async_task.ST_URL_REC,data,is_json=True)
return
# upload API entry point:
@app.route("/xfer-external/upload",methods=["POST"])
@check_auth_header
def upload_request():
auth_header = request.headers[AUTH_HEADER_NAME]
system_addr = EXT_TRANSFER_MACHINE_INTERNAL
system_name = EXT_TRANSFER_MACHINE_PUBLIC
targetPath = request.form["targetPath"] # path to save file in cluster
sourcePath = request.form["sourcePath"] # path from the local FS
if system_addr == None or system_addr == "":
data = jsonify(error="System not set in request")
return data, 400
if targetPath == None or targetPath == "":
data = jsonify(error="Target path not set in request")
return data, 400
if sourcePath == None or sourcePath == "":
data = jsonify(error="Source path not set in request")
return data, 400
# checks if sourcePath is a valid path
check = is_valid_dir(targetPath, auth_header, system_name, system_addr)
if not check["result"]:
return jsonify(description="sourcePath error"), 400, check["headers"]
# obtain new task from Tasks microservice
task_id = create_task(auth_header,service="storage")
if task_id == -1:
return jsonify(error="Error creating task"), 400
# asynchronous task creation
try:
update_task(task_id, auth_header,async_task.QUEUED)
aTask = threading.Thread(target=upload_task,
args=(auth_header,system_name, system_addr,targetPath,sourcePath,task_id))
storage_tasks[task_id] = aTask
storage_tasks[task_id].start()
task_url = "{kong_url}/tasks/{task_id}".format(kong_url=KONG_URL,task_id=task_id)
data = jsonify(success="Task created",task_url=task_url,task_id=task_id)
return data, 201
except Exception as e:
data = jsonify(error=e)
return data, 400
## Internal Transfer MicroServices:
## cp / rm / mv / rsync using Jobs microservice
# executes system cp/mv/rm or rsync (xfer-internal)
# creates a sbatch file to execute in --partition=xfer
# user_header for user identification
# command = "cp" "mv" "rm" "rsync"
# sourcePath = source object path
# targetPath = in "rm" command should be ""
# jobName = --job-name parameter to be used on sbatch command
# jobTime = --time parameter to be used on sbatch command
# stageOutJobId = value to set in --dependency:afterok parameter
# account = value to set in --account parameter
def exec_internal_command(auth_header,command,sourcePath, targetPath, jobName, jobTime, stageOutJobId, account):
action = "{command} {sourcePath} {targetPath}".\
format(command=command, sourcePath=sourcePath, targetPath=targetPath)
try:
td = tempfile.mkdtemp(prefix="job")
sbatch_file = open(td + "/sbatch-job.sh", "w")
sbatch_file.write("#! /bin/bash -l\n")
sbatch_file.write("#SBATCH --job-name={jobName}\n".format(jobName=jobName))
sbatch_file.write("#SBATCH --time={jobTime}\n".format(jobTime=jobTime))
sbatch_file.write("#SBATCH --error=job-%j.err\n")
sbatch_file.write("#SBATCH --output=job-%j.out\n")
sbatch_file.write("#SBATCH --ntasks=1\n")
sbatch_file.write("#SBATCH --partition={xfer}\n".format(xfer=XFER_PARTITION))
# test line for error
# sbatch_file.write("#SBATCH --constraint=X2450\n")
if stageOutJobId != None:
sbatch_file.write("#SBATCH --dependency=afterok:{stageOutJobId}\n".format(stageOutJobId=stageOutJobId))
if account != None:
app.logger.info(account)
sbatch_file.write(f"#SBATCH --account={account}")
sbatch_file.write("\n")
sbatch_file.write("echo -e \"$SLURM_JOB_NAME started on $(date): {action}\"\n".format(action=action))
sbatch_file.write("srun -n $SLURM_NTASKS {action}\n".format(action=action))
sbatch_file.write("echo -e \"$SLURM_JOB_NAME finished on $(date)\"\n")
sbatch_file.close()
except IOError as ioe:
app.logger.error(ioe.message)
result = {"error": 1, "msg":ioe.message}
return result
# create xfer job
resp = create_xfer_job(STORAGE_JOBS_MACHINE, auth_header, td + "/sbatch-job.sh")
# remove sbatch file and dir
os.remove(td + "/sbatch-job.sh")
os.rmdir(td)
return resp
# Internal cp transfer via SLURM with xfer partition:
@app.route("/xfer-internal/cp", methods=["POST"])
@check_auth_header
def internal_cp():
return internal_operation(request, "cp")
# Internal mv transfer via SLURM with xfer partition:
@app.route("/xfer-internal/mv", methods=["POST"])
@check_auth_header
def internal_mv():
return internal_operation(request, "mv")
# Internal rsync transfer via SLURM with xfer partition:
@app.route("/xfer-internal/rsync", methods=["POST"])
@check_auth_header
def internal_rsync():
return internal_operation(request, "rsync")
# Internal rm transfer via SLURM with xfer partition:
@app.route("/xfer-internal/rm", methods=["POST"])
@check_auth_header
def internal_rm():
return internal_operation(request, "rm")
# common code for internal cp, mv, rsync, rm
def internal_operation(request, command):
auth_header = request.headers[AUTH_HEADER_NAME]
system_idx = SYSTEMS_PUBLIC.index(STORAGE_JOBS_MACHINE)
system_addr = SYS_INTERNALS_UTILITIES[system_idx]
system_name = STORAGE_JOBS_MACHINE
try:
targetPath = request.form["targetPath"] # path to save file in cluster
if targetPath == "":
return jsonify(error="targetPath is empty"), 400
except:
app.logger.error("targetPath not specified")
return jsonify(error="targetPath not specified"), 400
# using actual_command to add options to check sanity of the command to be executed
actual_command = ""
if command in ['cp', 'mv', 'rsync']:
try:
sourcePath = request.form["sourcePath"] # path to get file in cluster
if sourcePath == "":
return jsonify(error="sourcePath is empty"), 400
except:
app.logger.error("sourcePath not specified")
return jsonify(error="sourcePath not specified"), 400
# checks if file to copy, move or rsync (targetPath) is a valid path
# remove the last part of the path (after last "/" char) to check if the dir can be written by user
_targetPath = targetPath.split("/")[:-1]
_targetPath = "/".join(_targetPath)
app.logger.info(f"_targetPath={_targetPath}")
check_dir = is_valid_dir(_targetPath, auth_header, system_name, system_addr)
if not check_dir["result"]:
return jsonify(description="targetPath error"), 400, check_dir["headers"]
check_file = is_valid_file(sourcePath, auth_header, system_name, system_addr)
if not check_file["result"]:
check_dir = is_valid_dir(sourcePath, auth_header, system_name, system_addr)
if not check_dir["result"]:
return jsonify(description="sourcePath error"), 400, check_dir["headers"]
if command == "cp":
actual_command = "cp --force -dR --preserve=all -- "
elif command == "mv":
actual_command = "mv --force -- "
else:
actual_command = "rsync -av -- "
elif command == "rm":
# for 'rm' there's no source, set empty to call exec_internal_command(...)
# checks if file or dir to delete (targetPath) is a valid path or valid directory
check_file = is_valid_file(targetPath, auth_header, system_name, system_addr)
if not check_file["result"]:
check_dir = is_valid_dir(targetPath, auth_header, system_name, system_addr)
if not check_dir["result"]:
return jsonify(description="targetPath error"), 400, check_dir["headers"]
sourcePath = ""
actual_command = "rm -rf -- "
else:
return jsonify(error=f"Command {command} not allowed"), 400
try:
jobName = request.form["jobName"] # jobName for SLURM
if jobName == None or jobName == "":
jobName = command + "-job"
app.logger.info("jobName not found, setting default to: {jobName}".format(jobName=jobName))
except:
jobName = command + "-job"
app.logger.info("jobName not found, setting default to: {jobName}".format(jobName=jobName))
try:
jobTime = request.form["time"] # job time, default is 2:00:00 H:M:s
if not job_time.check_jobTime(jobTime):
return jsonify(error="Not supported time format"), 400
except:
jobTime = "02:00:00"
try:
stageOutJobId = request.form["stageOutJobId"] # start after this JobId has finished
except:
stageOutJobId = None
# select index in the list corresponding with machine name
system_idx = SYSTEMS_PUBLIC.index(STORAGE_JOBS_MACHINE)
system_addr = SYS_INTERNALS[system_idx]
app.logger.info(f"USE_SLURM_ACCOUNT: {USE_SLURM_ACCOUNT}")
# get "account" parameter, if not found, it is obtained from "id" command
try:
account = request.form["account"]
except:
if USE_SLURM_ACCOUNT:
username = get_username(auth_header)
id_command = f"timeout {UTILITIES_TIMEOUT} id -gn -- {username}"
resp = exec_remote_command(auth_header, STORAGE_JOBS_MACHINE, system_addr, id_command)
if resp["error"] != 0:
retval = check_command_error(resp["msg"], resp["error"], f"{command} job")
return jsonify(description=f"Failed to submit {command} job", error=retval["description"]), retval["status_code"], retval["header"]
account = resp["msg"]
else:
account = None
# check if machine is accessible by user:
# exec test remote command
resp = exec_remote_command(auth_header, STORAGE_JOBS_MACHINE, system_addr, "true")
if resp["error"] != 0:
error_str = resp["msg"]
if resp["error"] == -2:
header = {"X-Machine-Not-Available": "Machine is not available"}
return jsonify(description=f"Failed to submit {command} job"), 400, header
if in_str(error_str,"Permission") or in_str(error_str,"OPENSSH"):
header = {"X-Permission-Denied": "User does not have permissions to access machine or path"}
return jsonify(description=f"Failed to submit {command} job"), 404, header
retval = exec_internal_command(auth_header, actual_command, sourcePath, targetPath, jobName, jobTime, stageOutJobId, account)
# returns "error" key or "success" key
try:
error = retval["error"]
errmsg = retval["msg"]
desc = retval["desc"]
# headers values cannot contain "\n" strings
return jsonify(error=desc), 400, {"X-Sbatch-Error": errmsg}
except KeyError:
success = retval["success"]
task_id = retval["task_id"]
return jsonify(success=success, task_id=task_id), 201
# function to call SBATCH in --partition=xfer
# uses Jobs microservice API call: POST http://{compute_url}/{machine}
# all calls to cp, mv, rm or rsync are made using Jobs us.
def create_xfer_job(machine,auth_header,fileName):
auth_header = request.headers[AUTH_HEADER_NAME]
files = {'file': open(fileName, 'rb')}
try:
req = requests.post("{compute_url}/jobs/upload".
format(compute_url=COMPUTE_URL),
files=files, headers={AUTH_HEADER_NAME: auth_header, "X-Machine-Name":machine}, verify= (SSL_CRT if USE_SSL else False))
retval = json.loads(req.text)
if not req.ok:
return {"error":1,"msg":retval["description"],"desc":retval["error"]}
return retval
except Exception as e:
app.logger.error(e)
return {"error":1,"msg":e}
@app.route("/status",methods=["GET"])
def status():
app.logger.info("Test status of service")
# TODO: check backend storage service to truthfully respond this request
return jsonify(success="ack"), 200
def create_staging():
# Object Storage object
global staging
staging = None
if OBJECT_STORAGE == "swift":
app.logger.info("Into swift")
from swiftOS import Swift
# Object Storage URL & data:
SWIFT_URL = os.environ.get("F7T_SWIFT_URL")
SWIFT_API_VERSION = os.environ.get("F7T_SWIFT_API_VERSION")
SWIFT_ACCOUNT = os.environ.get("F7T_SWIFT_ACCOUNT")
SWIFT_USER = os.environ.get("F7T_SWIFT_USER")
SWIFT_PASS = os.environ.get("F7T_SWIFT_PASS")
url = "{swift_url}/{swift_api_version}/AUTH_{swift_account}".format(
swift_url=SWIFT_URL, swift_api_version=SWIFT_API_VERSION, swift_account=SWIFT_ACCOUNT)
staging = Swift(url=url, user=SWIFT_USER, passwd=SWIFT_PASS, secret=SECRET_KEY)
elif OBJECT_STORAGE == "s3v2":
app.logger.info("Into s3v2")
from s3v2OS import S3v2
# For S#:
S3_URL = os.environ.get("F7T_S3_URL")
S3_ACCESS_KEY = os.environ.get("F7T_S3_ACCESS_KEY")
S3_SECRET_KEY = os.environ.get("F7T_S3_SECRET_KEY")
staging = S3v2(url=S3_URL, user=S3_ACCESS_KEY, passwd=S3_SECRET_KEY)
elif OBJECT_STORAGE == "s3v4":
app.logger.info("Into s3v4")
from s3v4OS import S3v4
# For S#:
S3_URL = os.environ.get("F7T_S3_URL")
S3_ACCESS_KEY = os.environ.get("F7T_S3_ACCESS_KEY")
S3_SECRET_KEY = os.environ.get("F7T_S3_SECRET_KEY")
staging = S3v4(url=S3_URL, user=S3_ACCESS_KEY, passwd=S3_SECRET_KEY)
else:
app.logger.warning("No Object Storage for staging area was set.")
def get_upload_unfinished_tasks():
# cleanup upload dictionary
global uploaded_files
uploaded_files = {}
app.logger.info("Staging Area Used: {}".format(staging.url))
app.logger.info("ObjectStorage Technology: {}".format(staging.get_object_storage()))
try:
# query Tasks microservice for previous tasks. Allow 30 seconds to answer
# only unfinished upload process
status_code = [async_task.ST_URL_ASK, async_task.ST_URL_REC, async_task.ST_UPL_CFM, async_task.ST_DWN_BEG, async_task.ST_DWN_ERR]
retval=requests.get(f"{TASKS_URL}/taskslist", json={"service": "storage", "status_code":status_code}, timeout=30, verify=(SSL_CRT if USE_SSL else False))
if not retval.ok:
app.logger.error("Error getting tasks from Tasks microservice")
app.logger.warning("TASKS microservice is down")
app.logger.warning("STORAGE microservice will not be fully functional")
app.logger.warning(f"Next try in {STORAGE_POLLING_INTERVAL} seconds")
return
queue_tasks = retval.json()
# queue_tasks structure: "tasks"{
# task_{id1}: {..., data={} }
# task_{id2}: {..., data={} } }
# data is the field containing every
queue_tasks = queue_tasks["tasks"]
n_tasks = 0
for key,task in queue_tasks.items():
task = json.loads(task)
# iterating over queue_tasls
try:
data = task["data"]
# check if task is a non ending /xfer-external/upload downloading
# from SWIFT to filesystem and it crashed before download finished,
# so it can be re-initiated with /xfer-external/upload-finished
# In that way it's is marked as erroneous
if task["status"] == async_task.ST_DWN_BEG:
task["status"] = async_task.ST_DWN_ERR
task["description"] = "Storage has been restarted, process will be resumed"
update_task(task["hash_id"], None, async_task.ST_DWN_ERR, data, is_json=True)
uploaded_files[task["hash_id"]] = data
n_tasks += 1
except KeyError as e:
app.logger.error(e)
app.logger.error(task["data"])
app.logger.error(key)
except Exception as e:
# app.logger.error("hash_id={hash_id}".format(hash_id=data["hash_id"]))
app.logger.error(data)
app.logger.error(e)
app.logger.error(type(e))
app.logger.info("Not finished upload tasks recovered from taskpersistance: {n}".format(n=n_tasks))
except Exception as e:
app.logger.warning("TASKS microservice is down")
app.logger.warning("STORAGE microservice will not be fully functional")
app.logger.error(e)
def init_storage():
# should check Tasks tasks than belongs to storage
create_staging()
get_upload_unfinished_tasks()
if __name__ == "__main__":
# log handler definition
# timed rotation: 1 (interval) rotation per day (when="D")
logHandler = TimedRotatingFileHandler('/var/log/storage.log', when='D', interval=1)
logFormatter = logging.Formatter('%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
'%Y-%m-%dT%H:%M:%S')
logHandler.setFormatter(logFormatter)
logHandler.setLevel(logging.DEBUG)
# get app log (Flask+werkzeug+python)
logger = logging.getLogger()
# set handler to logger
logger.addHandler(logHandler)
# checks QueuePersistence and retakes all tasks
init_storage()
# aynchronously checks uploaded_files for complete download to FS
upload_check = threading.Thread(target=check_upload_files)
upload_check.start()
if USE_SSL:
app.run(debug=debug, host='0.0.0.0', use_reloader=False, port=STORAGE_PORT, ssl_context=(SSL_CRT, SSL_KEY))
else:
app.run(debug=debug, host='0.0.0.0', use_reloader=False, port=STORAGE_PORT)
|
voice_controller.py
|
from homeserver.voice_control.google_speech import GoogleVoiceRecognition
from homeserver.voice_control.snowboy.snowboydecoder import HotwordDetector, play_audio_file
#make the voicecontrol follow the device interface structure for control
from homeserver.interface import DeviceInterface, DeviceTarget
# import the DeviceCommand
from homeserver.command_handler import DeviceCommand
from homeserver import app, logger, device_handler
import datetime
import threading
class VoiceThread(threading.Thread):
def __init__(self, parent=None, **kvargs):
self.parent = parent
super(VoiceThread, self).__init__(**kvargs)
class VoiceController(DeviceInterface):
def __init__(self, start=True):
#### variables for the DeviceInterface ###
self.name="Voice Control"
self.connected = False
self.is_on = False
self.running = False
self._devices = []
self.targets = set('voice_control')
self.commands = [DeviceTarget(self.targets, "toggle", self.toggle_detection)]
self.dev_id = 200000 #TODO: read this from some config or smth
### ############# ###
self.google_recognizer = GoogleVoiceRecognition(app.config['GOOGLE_CREDENTIALS'])
#a list of strings to help google speect to text
self.google_keyphrases = device_handler.get_voice_keys()
self.interrupted = False
#some parameters, seem okay for two word command
self.silent_count_threshold = 2
self.recording_timeout = 10
# param to the snowboy detector
self.sensitivity = 0.5
self.model = app.config['SNOWBOY_MODEL']
self.recording_path = app.config['AUDIO_PATH_AFTER_DETECTION']
# the keyword detector is initialized in the start detector
self.detector = None
self.vthread = None
self.voice_callbacks = {}
if start:
self.start_detector()
def initialize_detector(self):
logger.info("model path: {}".format(self.model))
self.detector = HotwordDetector(self.model, sensitivity=self.sensitivity)
#set the path of the audio file saved
self.detector.set_recording_filepath(self.recording_path)
#the voicethread
self.vthread = VoiceThread(target=self._start_detection, parent=self)
def start_detector(self):
"""
Method to be called outside the VoiceController class to start
the detection.
"""
self.initialize_detector()
self.vthread.start()
self.is_on = True
self.connected = True
self.running = True
logger.info('Keyword detector started')
def _start_detection(self):
# main loop
self.detector.start(detected_callback=self.detection_callback,
interrupt_check=self.interrupt_callback,
sleep_time=0.03,
audio_recorder_callback=self.audio_recorded_callback,
silent_count_threshold=self.silent_count_threshold,
recording_timeout=self.recording_timeout)
def detection_callback(self):
"""This is called when the hot word is detected, this just logs the time
keyword is detected. The actual handling is done after audio is recorder
in audio detection callback
"""
logger.debug("Keyword detected at {}".format(datetime.datetime.now().isoformat() ) )
def audio_recorded_callback(self, fname):
"""
Called when after detecting keyword an audioclip has done recorded and saved
recognizes what was said and then acts on the interpreted audio
"""
command_string = self.google_recognizer.interpret_command(fname,
keyphrases=self.google_keyphrases)
logger.debug("command_string: {}".format(command_string))
if command_string:
command = DeviceCommand.command_from_string(command_string)
logger.debug("sending command to device_handler: {}".format(command))
device_handler.handle_voice_command(command)
def toggle_detection(self):
if self.running:
self.stop_detection()
else:
self.start_detector()
def stop_detection(self):
logger.info("Stopping voice detection")
self.interrupted = True
self.vthread.join()
self.running = False
self.is_on = False
logger.info("Voice detection halted")
def interrupt_callback(self):
return self.interrupted
def command_subjects(self,command, *args):
"""Base methods, common error checking for all base classes implemented here"""
super().command_subjects(command)
#parse command
if len(command.arguments) < 1:
return
action = command.arguments[0]
func = None
# match the action in the command to the commands of this class
for command in self.commands:
if action == command.action:
func = command.action_func
break
if func is not None:
func()
|
server.py
|
from cryptoran import blockcipher, keyexchange
from PigeonConnection import PigeonConnection
import socket, select, sys, threading
class PigeonServer(PigeonConnection):
'''
Multithreaded TCP socket server. Provides encrypted communication
'''
def __init__(self, ip: str, port: int, messageHandler: callable,
connectionHandler: callable, maxConnections: int, cipherIV = None, cipher = None):
# Network
self.bufferSize = 1024
self.clients = {}
self.nextId = 0
self.serverRunning = True
self.maxConnections = maxConnections
self.connectionHandler = connectionHandler
self.printLock = threading.Lock()
# Encryption
self.cipher = None
if cipher:
self.dh = keyexchange.DiffieHellman(primeLength=256)
self.dhParams = self.dh.generateSecret()
self.BlockCipher = cipher
self.iv = cipherIV
self.cipher = cipher
# Configuration
self.serverSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not self.serverSock:
raise IOError('Could not create the server socket')
try:
self.serverSock.bind((ip, port))
except socket.error as err:
raise IOError(f'Address binding failed. Error:\n{err}')
super().__init__(ip, port, messageHandler)
def listen(self):
# start listening for max <self.maxConnections> connections to the server socket
self.serverSock.listen(self.maxConnections)
while self.serverRunning:
# check if there is an incoming connection to the server
readable, _, _ = select.select([self.serverSock], [], [], 0.1)
for incoming in readable:
if incoming == self.serverSock:
# accept the connection
client, addr = self.serverSock.accept()
client.settimeout(120)
cipher = None
if self.cipher:
# negotiate on key using Diffie Hellman protocol
# 1 - send dh params to client
self.sendUnencrypted(self.dhParams, client)
# 2 - listen for Diffie-Hellman input
dhRaw = client.recv(512)
dhInput = self.decodeUnencrypted(dhRaw)
sharedKey = self.dh.generateSharedKey(dhInput)
cipher = self.BlockCipher('cbc', sharedKey, self.iv)
clientPair = (client, cipher)
self.clients[self.nextId] = clientPair
# start a new thread listening for incoming messages from the new client
threading.Thread(target=self.listenToClient, args=(addr, self.nextId, clientPair)).start()
# expose the new connection to class user
self.connectionHandler(addr, self.nextId)
self.nextId += 1
print('stopped listening for incoming TCP connections') # debug
def listenToClient(self, addr: str, id: int, clientPair):
self.sendMessage('Thanks for connecting, your id is ' + str(self.nextId), clientPair)
while self.serverRunning:
try:
if self.clients[id] == None: # another method has requested disconnection of client
raise Exception()
readable, _, _ = select.select([clientPair[0]], [], [], 0.1)
for _ in readable:
payload = clientPair[0].recv(self.bufferSize)
if not payload:
raise Exception()
else:
plaintext = self.decodeReceived(payload, clientPair[1])
with self.printLock:
self.messageHandler(id, addr, plaintext)
except:
clientPair[0].close()
print('TCP socket connection to client', id, 'is terminated')
del self.clients[id]
return
print(f'stopped listening client {id}')
def send(self, message: str, clientId: int):
self.sendMessage(message, self.clients[clientId])
def stop(self):
self.serverRunning = False
for clientId in self.clients.keys():
self.clients[clientId] = None
def start(self):
self.listen()
def broadcast(self, message: str):
for clientPair in self.clients.values():
try:
self.sendMessage(message, clientPair)
except:
continue
if __name__ == '__main__':
aesiv = 0xed7ef412977a7df3af9e67307bd2214b
ip, port = None, None
unsafe = False
try:
ip = sys.argv[1]
port = int(sys.argv[2])
if len(sys.argv) > 3 and sys.argv[3] == '--unsafe':
unsafe = True
except:
print('usage: python server.py ip port [--unsafe]')
print(sys.argv)
sys.exit()
server = None
def messageHandler(id, address, message):
print(f'{str(address)} [id - {id}]: {message}')
def connectionHandler(address, id):
print(f'{address} connected - id: {id}')
# configure cipher and server
try:
if unsafe:
server = PigeonServer(ip, port, messageHandler, connectionHandler, 5)
else:
server = PigeonServer(ip, port, messageHandler, connectionHandler, 5, aesiv, blockcipher.AES)
except IOError as err:
print('Error during server initialization:')
print(err)
sys.exit(1)
# start listening
print(f'listening on port {port}')
threading.Thread(target=server.start).start()
print(f'You may enter a broadcast message.\nEnter ".exit" or KeyboardInterrupt to destroy server')
while True:
try:
message = input()
if message == '.exit':
raise KeyboardInterrupt
server.broadcast(message)
except KeyboardInterrupt:
print('\nTerminating server')
server.stop()
break
print('main thread dying')
|
test_classifier_process.py
|
from pylsl import StreamInfo, StreamOutlet, StreamInlet, resolve_stream
from multiprocessing import Process
from time import sleep, time
from BIpy.bci.classifier_process import ClassifierProcess
from BIpy.bci.inlets import ClassifierInlet
from BIpy.bci.models import DummyClassifier
import math
def send_data():
# def lsl stream
print('creating stream \"test_input\"...')
info = StreamInfo(source_id='test_input')
outlet = StreamOutlet(info)
print('done')
while True:
sleep(.2)
sample = time()
# print('test_input sends:', sample)
outlet.push_sample([sample])
def test_ClassifierProcess():
# run lsl input stream
sproc = Process(target=send_data)
sproc.start()
sleep(2)
# make and run classifier process
cproc = ClassifierProcess(DummyClassifier(), 'test_input', window_size=10)
cproc.start()
sleep(2)
# make classifier inlet
cinlet = ClassifierInlet()
# sleep(5)
# send data, assert equals classifier inlet recieved data
for _ in range(10):
htime, ttime = time(), cinlet.pull_sample()[0][0]
print(htime, ttime)
assert math.isclose(htime, ttime, abs_tol=1e2)
# terminate process
sproc.terminate()
sproc.join()
cproc.terminate()
cproc.join()
|
main.py
|
# Source: https://github.com/pytorch/examples/tree/master/mnist_hogwild
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from torchvision import transforms
from load.load import DataLoader
from store.cifar10 import Cifar10
from train import train_pytorch, train_one_access, test
import time
ONE_ACCESS = 1
PYTORCH = 2
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('--rel-sample-size', type=int, default=500, metavar='N',
help="relative sample size for sample creator (default: 500)")
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1, metavar='N',
help='number of epochs to train (default: 20)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--num-processes', type=int, default=2, metavar='N',
help='how many training processes to use (default: 2)')
parser.add_argument('--loader', type=int, default=ONE_ACCESS,
help='Which data loader to use')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
if __name__ == '__main__':
args = parser.parse_args()
torch.manual_seed(args.seed)
model = Net()
model.share_memory() # gradients are allocated lazily, so they are not shared here
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
start = time.time()
if args.loader == ONE_ACCESS:
# NOTE: pass the correct input data folder (preferably full path) with dataset already downloaded
data_store = Cifar10(
input_data_folder="~/datasets/cifar-10-batches-py", \
max_batches=4, batch_size=args.batch_size, \
rel_sample_size=args.rel_sample_size, max_samples=1, \
transform=transform)
data_store.initialize()
data_loader = DataLoader(data_store, epochs=(args.epochs*args.num_processes))
processes = []
for rank in range(args.num_processes):
if args.loader == ONE_ACCESS:
p = mp.Process(target=train_one_access, args=(args, model, data_loader))
# train_one_access(args, model, data_loader)
else:
p = mp.Process(target=train_pytorch, args=(rank, args, model))
# train_pytorch(rank, args, model)
p.start()
processes.append(p)
for p in processes:
p.join()
end = time.time()
print(end-start)
if args.loader == ONE_ACCESS:
data_loader.stop_batch_creation()
# Once training is complete, we can test the model
test(args, model)
|
train_pg_f18_parallel.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
"""
import numpy as np
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
import multiprocessing
import math as m
import ipdb as pdb
import sys
sys.path.append("../../../gravity_ball_game/")
from gravity_ball_game_training_simulator import GB_game
from forked_pdb import ForkedPdb
#============================================================================================#
# Utilities
#============================================================================================#
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation='tf.tanh', output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
activation = [exec(activation)]
if not isinstance(size,list):
size = [size]
fc_layer = input_placeholder
with tf.variable_scope(scope):
for i in range(n_layers-1): # Note it's only going to work for 1 layer.
fc_layer = tf.contrib.layers.fully_connected(fc_layer, size[i],weights_regularizer=tf.contrib.layers.l2_regularizer(0.05),activation_fn=activation[i])
output_placeholder = tf.contrib.layers.fully_connected(fc_layer, output_size,
weights_regularizer=tf.contrib.layers.l2_regularizer(0.05),
activation_fn=output_activation)
# raise NotImplementedError
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Policy Gradient
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
import tensorflow as tf
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.output_activation = computation_graph_args['output_activation']
self.learning_rate = computation_graph_args['learning_rate']
self.baseline_lr = computation_graph_args['baseline_lr']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.reward_to_go = estimate_return_args['reward_to_go']
self.nn_baseline = estimate_return_args['nn_baseline']
self.normalize_advantages = estimate_return_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
# raise NotImplementedError
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
# YOUR CODE HERE
sy_adv_n = tf.placeholder(shape = [None], name = 'adv',dtype = tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
# raise NotImplementedError
if self.discrete:
# YOUR_CODE_HERE
sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, 'mlp', self.n_layers, self.size, activation=tf.tanh, output_activation=exec(self.output_activation))
# Right, we want output activation = none, bc these are "logits", which in tf-language means the unscaled inputs to the softmax function
return sy_logits_na
else:
# YOUR_CODE_HERE
sy_mean = build_mlp(sy_ob_no, self.ac_dim, 'mlp', self.n_layers, self.size, activation=tf.tanh, output_activation=exec(self.output_activation))
# Note this will be a vector of means of length self.ac_dim
sy_logstd = tf.get_variable(name = 'std_vec', shape = self.ac_dim)
# weird! We'll separately train the std, not letting it depend on the network. Seems maybe there should be a separate network for this guy.
return (sy_mean, sy_logstd)
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I). ### Question: What the heck? How do I prove this? DO IT LATER. It makes intuitive sense!
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
# raise NotImplementedError
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_CODE_HERE
sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na,1),axis = 1) # only wanna squeeze out one dimension
# Question: should there be a tf.log in here for these unscaled logits? I feel
# So what I've found out: if you do samples = tf.multinomial(tf.log([[0.4,0.5,0.1]]),100000), you get samples in proportion to the probabilities therein
# OKAY: tf.multinomial(tf.log([[x1,x2,x3]])) = [x1/sum(x),x2/sum(x)...] probability distribution.
# So therefore tf.multinomial([[a1,a2,a3]]) = [e^a1/sum(e^a),...] distribution. So yes, just input the raw outputs.
#it takes the exponential of each entry and makes a prob dist of those.
else:
sy_mean, sy_logstd = policy_parameters
sy_sampled_ac = tf.add(sy_mean,tf.multiply(sy_logstd,tf.random_normal(tf.shape(sy_mean)))) # note I'm treating sy_logstd as std, not log(std)
return sy_sampled_ac
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
Question: Won't the probability be approaching zero for any set of actions? and thus log_prob = -inf?
I think we are substituting pdf for probability here. Odd.
AH! It doesn't matter! What we care about ultimately is the grad(log_prob(at|st)),
and dlog_p/dtheta = dlog_p/dlog_pdf * dlog_pdf/dtheta = 1 * dlog_pdf/dtheta (prove dlog_p/dlog_pdf = 1, SOME-OTHER-TIME!!!)
But in this case, we can just get log_pdf and later when gradient is taken it = gradient of log_p
KEY: I am pretty certain that by saying sy_logstd [=] [action_space_dim, ], that means we are using an identity for covariance matrix
So therefore we assume each dimension of the gaussian is independent from the others and thus total_pdf = pdf_dim1*pdf_dim2 * ... * pdfdim_n
Or is this just saying that we don't know the covariance matrix???
I'm pretty sure this is a messed up assumption, bc the dimensions of the output action are definitely not independent, as they share
most of a neural network in common. Thus I think this wouldn't actually give the correct gradient to maximize the probability of the
action taken. Must be close enough?
Notes: I believe the probability for an action is given by the softmax function. That's how tf.multinomial interpreted those "logits" inputs
"""
# raise NotImplementedError
if self.discrete:
sy_logits_na = policy_parameters
softmaxed_logits = tf.nn.softmax(sy_logits_na) #gives [batch_size,action_space] vector
# For each entry in batch, select the appropriate chosen action. sy_ac_na is [batch_size,]
indexer = tf.stack([tf.range(0,tf.shape(sy_ac_na)[0],1), sy_ac_na], axis = 1) # Makes the [[0,a0],[1,a1],...] array
probs_of_chosen_actions = tf.gather_nd(softmaxed_logits,indexer) # gets the responsible action in each row. vector is [batch,] = [p_a1 p_a2 p_a3 ...]
# each element of indexer ([k, a_k]) selects the k row and a_k column of softmaxed_logits
sy_logprob_n = tf.log(probs_of_chosen_actions) # So flame...
# But this entire method is less stable than softmax_cross_entropy_with_logits... Lunar lander before did it the same way... RIGHT?
else:
sy_mean, sy_logstd = policy_parameters
# What needs to happen is I need to take these chosen sy_means, which are [batches,action_space], and get the probability of each action
# in each batch sample, using the sy_mean, which is [batches,action_space] and sy_logstd, which is [action_space, ]
# I then multiple the entire row of probabilities to get the total probability, and then take the log of that. Tomorrow;)
sigma_square = tf.square(sy_logstd)
diff_mat = tf.subtract(sy_ac_na,sy_mean)
two_pi = tf.constant(2*m.pi,dtype = tf.float32)
first_term = tf.divide(tf.cast(1,tf.float32),tf.sqrt(tf.multiply(two_pi,sigma_square)))
second_term = tf.exp(tf.negative(tf.divide(tf.square(diff_mat),tf.multiply(tf.cast(2,tf.float32),sigma_square))))
pdf_output = tf.multiply(first_term,second_term)
log_pdf = tf.log(pdf_output)
sy_logprob_n = tf.reduce_sum(log_pdf,1) # we use sum, bc sum(log_prob) = log(mult(all_probs))
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
#========================================================================================#
# ----------PROBLEM 2----------
# Loss Function and Training Operation
#========================================================================================#
self.unscaled_loss = -tf.reduce_mean(self.sy_logprob_n) # This loss is just the log(a|s), not scaled by the r(path).
# This is a (+) number.
self.s_scaledlogprob_n = tf.multiply(self.sy_logprob_n,self.sy_adv_n) # this assumes the sy_adv_n is taking the total reward or reward to go at each timestep.
self.loss = -tf.reduce_mean(self.s_scaledlogprob_n) # YOUR CODE HERE
self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
#========================================================================================#
# ----------PROBLEM 6----------
# Optional Baseline
#
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# amazing that we use an entirely separate network to learn the baseline reward prediction!
#========================================================================================#
if self.nn_baseline:
self.baseline_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_baseline",
n_layers=self.n_layers,
size=self.size))
# YOUR_CODE_HERE
self.sy_target_n = tf.placeholder(shape=[None], name="target_n", dtype=tf.int32)
self.baseline_loss = tf.losses.mean_squared_error(self.sy_target_n, self.baseline_prediction) # because this is a continuous value, lets just use sum of squared error. or mean of sqare err
if self.baseline_lr is not None:
self.baseline_update_op = tf.train.AdamOptimizer(self.baseline_lr).minimize(self.baseline_loss)
else:
self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.baseline_loss)
def parallel_sampler(self, env, temp_save_string):
"""okay, so the plan will be to spawn processes = cpu_number, then have
2 queues. One will store paths and the other will store path lengths.
I forsee an issue: if I use .join() before doing backprop, then the
queue might overload. Well I guess just use that after queue.get() but
before your backprop.
New plan. gotta use Value() instead of q_len.:"""
cpu_count = multiprocessing.cpu_count()
q_path = multiprocessing.Queue()
v_pathlen = multiprocessing.Value('i',0)
paths = []
processes = []
for i in range(1):
job = multiprocessing.Process(name='p_'+str(i),target=self.sample_t_parallel,
args=(env,q_path,v_pathlen,temp_save_string))
print('starting job on cpu ' + str(i))
processes.append(job)
job.start()
proc_alive = True
while proc_alive == True:#this holds it up till all the processes finish, and then we move on into
# emptying the residual paths
alive_list = [proc.is_alive() for proc in processes]
proc_alive = any(alive_list) # will tell me if there's at least one process still living.
while not q_path.empty():
paths.append(q_path.get())
timesteps_this_batch = v_pathlen.value
for job in processes:
job.join()
return paths, timesteps_this_batch
def sample_t_parallel(self,env,q_path,q_len,temp_save_string):
import tensorflow as tf
timesteps_this_batch = 0
# First thing that needs to happen is loading the model.
# All of this will take place within a new session
ForkedPdb().set_trace()
with tf.Session() as custom_sess:
saver = tf.train.Saver()
saver.restore(custom_sess,temp_save_string) # still this doesn't work. I suspect tensorflow is
#imported in the wrong location!
print('model loaded from '+temp_save_string)
while True:
animate_this_episode=False # we won't animate parallel
# We are going to use ForkedPdb on this portion to start digging into it.
path = self.sample_trajectory(env, animate_this_episode)
print('path good') #apparently itn's not good!
q_path.put(path)
with multiprocessing.Lock():
v_pathlen.value += pathlength(path)
print('the value of v_pathlen.value is '+str(v_pathlen.value) + ' on '+str(multiprocessing.current_process().name))
if v_pathlen.value > self.min_timesteps_per_batch:
print("v_pathlen = " + str(v_pathlen.value))
break
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
if hasattr(self,'running_only') and self.animate:
animate_this_episode=True
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch #
def parallel_trajectory(self, env, queue):
""" so Paths is a list of dictionaries.
Because of this, and because advantage and summer for awards is actually competed later, you will not be able to put individual observations into the queue
Instead we will need to add entire path dictionaries to the queue. This really shouldn't pose too much of an issue.
Actually, it might be simpler to have the paralyzing outside of the sample_trajectories. """
path = self.sample_trajectory
queue.put(path)
def sample_trajectory(self, env, animate_this_episode, custom_sess):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
while True:
if animate_this_episode:
# pdb.set_trace()
env.render()
time.sleep(0.01)
# pdb.set_trace()
obs.append(ob)
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
# raise NotImplementedError
ForkedPdb().set_trace()
if custom_sess is not None:
ac = custom_sess.run(self.sy_sampled_ac,feed_dict = {self.sy_ob_no:[ob]})
else:
ac = self.sess.run(self.sy_sampled_ac,feed_dict = {self.sy_ob_no:[ob]})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32)}
return path
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
def sum_of_rewards(self, re_n):
"""
Monte Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'ob_no' and 'ac_na' above.
"""
# YOUR_CODE_HERE
q_n = np.array([])
if self.reward_to_go:
# raise NotImplementedError
for re_single_path in re_n:
q_t_vector = [np.sum([(self.gamma**i)*(re_single_path[j+i]) for i in range(len(re_single_path)-j)]) for j in range(len(re_single_path))] # Seems wrong, but i guess we sum this entire thing...
# q_t_vector = [q_t for j in range(len(re_single_path))]
q_n = np.append(q_n,q_t_vector)
else:
# Loops are probably the slower way to do this. maybe even nested comprehensions are slower...
for re_single_path in re_n:
q_t = np.sum([(self.gamma**j)*re_single_path[j] for j in range(len(re_single_path))]) # Seems wrong, but i guess we sum this entire thing...
q_t_vector = [q_t for j in range(len(re_single_path))]
q_n = np.append(q_n,q_t_vector)
return q_n
def compute_advantage(self, ob_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
#====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
# to match the statistics, lets just scale them both to have std = 1 and mean = 0. This means we'll always sorta be normalizing advantages
# I think they maybe meant for me to just multiply the predictions by the std of the q_n and add the mean...
b_n = self.sess.run(self.baseline_prediction, feed_dict = {self.sy_ob_no:ob_no})
scale_mean = np.mean(q_n)
scale_std = np.std(q_n)
b_n = np.add(np.multiply(b_n,scale_std),scale_mean)
# pdb_checker = [np.mean(q_n),np.std(q_n),np.mean(b_n),np.std(b_n)] # Note that this works quite well. Not perfectly. The mean is pretty dead on.
# print(pdb_checker)
# q_n = np.divide(np.subtract(q_n,np.mean(q_n)),np.std(q_n))
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
return adv_n
def estimate_return(self, ob_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(ob_no, q_n)
#====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
# Question::: why do i think this is allowed and doesn't bias the policy gradient?
# Also, i should double check to make sure this actually works.
#====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# raise NotImplementedError
mean = np.mean(adv_n)
std = np.std(adv_n)
adv_n = np.divide(np.subtract(adv_n,mean),std) # YOUR_CODE_HERE
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths). These are the actions sampled I think.
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Optimizing Neural Network Baseline
#====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
# So this is where it actually does the fitting, and we'll fit it to the normalized q_n values.
target_n = np.divide(np.subtract(q_n,np.mean(q_n)),np.std(q_n))
self.batch_baseline_loss = self.sess.run(self.baseline_loss,feed_dict={self.sy_target_n : target_n, self.sy_ob_no : ob_no})
print('the baseline loss is '+ str(self.batch_baseline_loss))
self.sess.run(self.baseline_update_op,feed_dict={self.sy_target_n : target_n, self.sy_ob_no : ob_no})
#====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
# pdb.set_trace()
self.batch_loss = self.sess.run(self.loss, feed_dict = {self.sy_ob_no : ob_no, self.sy_ac_na : ac_na, self.sy_adv_n : adv_n})
print('the loss is '+str(self.batch_loss))
self.batch_unscaled_loss = self.sess.run(self.unscaled_loss, feed_dict = {self.sy_ob_no : ob_no, self.sy_ac_na : ac_na, self.sy_adv_n : adv_n})
print('the unscaled loss is '+str(self.batch_unscaled_loss))
_ = self.sess.run(self.update_op,feed_dict = {self.sy_ob_no : ob_no, self.sy_ac_na : ac_na, self.sy_adv_n : adv_n})
# raise NotImplementedError
def save_models_action(self,save_string):
saver = tf.train.Saver()
save_path = saver.save(self.sess, 'my_save_loc/'+save_string+'.ckpt')
print('save path is '+save_path)
return save_path
def load_models_action(self,save_path):
saver = tf.train.Saver()
saver.restore(self.sess,save_path)
print('model loaded from '+save_path)
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
baseline_lr,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
output_activation,
size,
save_models,
save_best_model,
resume_string,
run_model_only,
script_optimizing_dir,
parallel,
relative_positions,
death_penalty,
reward_circle,
num_enemies,
gb_discrete,
gb_max_speed):
start = time.time()
import tensorflow as tf
if script_optimizing_dir is not None:
logdir = logdir[:5]+script_optimizing_dir+'/'+logdir[5:]
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
if env_name == 'GB_game':
env = GB_game(num_char = num_enemies, reward_circle = reward_circle, death_penalty = death_penalty, relative_positions = relative_positions, discrete=gb_discrete, max_speed=gb_max_speed)
discrete = env.discrete
else:
env = gym.make(env_name)
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
# pdb.set_trace()
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'output_activation': output_activation,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
'baseline_lr' : baseline_lr,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
# Now we'll try to load if we are only running a model or if we are resuming training.
if run_model_only is not None:
agent.load_models_action(run_model_only)
agent.running_only = True
elif resume_string is not None:
agent.load_models_action(resume_string)
#setup for a parallel training loader.
if parallel is True:
temp_save_string = logdir[5:-2]+'_temp'
print(temp_save_string)
temp_save_string = agent.save_models_action(temp_save_string) #yet another janky way to do this
#========================================================================================#
# Training Loop
#========================================================================================#
best_avg_return = -(5e10)
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
if parallel is True:
paths, timesteps_this_batch = agent.parallel_sampler(env,temp_save_string)
print('so it returned it?')
else:
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
if run_model_only is not None:
continue
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
mean_return = np.mean(returns)
if mean_return > best_avg_return:
best_avg_return = mean_return
if save_best_model==True:
save_string = logdir[5:-2]
agent.save_models_action(save_string)
logz.log_tabular("AverageReturn", mean_return)
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
# My own
if hasattr(agent,'batch_baseline_loss'):
logz.log_tabular("BaselineLoss", agent.batch_baseline_loss)
logz.log_tabular("UnscaledLoss", agent.batch_unscaled_loss)
logz.log_tabular("Loss", agent.batch_loss)
logz.dump_tabular()
logz.pickle_tf_vars()
# if script_optimizing == True:
# print(np.max(returns))
# One potential issue here is that there won't be a local for the first iteration. we must make it
# so.
if parallel is True:
temp_save_string = logdir[5:-2]+'_temp'
agent.save_models_action(temp_save_string)
if save_models == True and save_best_model==False:
save_string = logdir[5:-2]
agent.save_models_action(save_string)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--baseline_lr', '-bllr', type=float, default=None)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--output_activation', type=str, default = None)
parser.add_argument('--size', '-s', type=int, default=64)
#I'm adding this one for my own edification
parser.add_argument('--save_models', action = 'store_true')
parser.add_argument('--save_best_model', action = 'store_true')
parser.add_argument('--resume_string', type = str, default = None) # put the model name that you will resume training from!
parser.add_argument('--run_model_only', type = str, default = None) # This is a string with the model savefile
parser.add_argument('--script_optimizing_dir', type = str, default = None) # use this if doing a bash_script method
parser.add_argument('--parallel', action = 'store_true')
# These 3 are for my game only!
parser.add_argument('--relative_positions', '-rp', action='store_true')
parser.add_argument('--death_penalty', '-dp', action='store_true')
parser.add_argument('--reward_circle', '-rc', action='store_true')
parser.add_argument('--num_enemies', type=int, default = 1)
parser.add_argument('--gb_discrete', action='store_true')
parser.add_argument('--gb_max_speed', type=int, default=20)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
baseline_lr=args.baseline_lr,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
output_activation = args.output_activation,
size=args.size,
save_models = args.save_models,
save_best_model = args.save_best_model,
resume_string = args.resume_string,
run_model_only = args.run_model_only,
script_optimizing_dir=args.script_optimizing_dir,
parallel=args.parallel,
relative_positions = args.relative_positions, # These 3 are only for the game!
death_penalty=args.death_penalty,
reward_circle=args.reward_circle,
num_enemies=args.num_enemies,
gb_discrete=args.gb_discrete,
gb_max_speed=args.gb_max_speed
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
# if args.render == False:
# p = Process(target=train_func, args=tuple())
# p.start()
# processes.append(p)
# # if you comment in the line below, then the loop will block
# # until this process finishes
# # p.join()
# if args.render == False:
# for p in processes:
# p.join()
# else:
train_func() # OH MY GOODNESS! The Render doesn't work if the above isn't commented out, and this line replacing it. Must use this line to render.
if __name__ == "__main__":
main()
# you add new args to the code by putting a new arg in 3 different places.
|
future_test.py
|
import sys
import traceback
import unittest
from threading import Thread, Event
from hazelcast.future import (
Future,
ImmediateFuture,
combine_futures,
make_blocking,
ImmediateExceptionFuture,
)
from hazelcast import six
from hazelcast.six.moves import range
class FutureTest(unittest.TestCase):
def test_set_result(self):
f = Future()
def set_result():
f.set_result("done")
Thread(target=set_result).start()
self.assertEqual(f.result(), "done")
def test_set_exception(self):
f = Future()
exc = []
def set_exception():
try:
{}["invalid_key"]
except KeyError as e:
exc.append(sys.exc_info())
f.set_exception(e, sys.exc_info()[2])
Thread(target=set_exception).start()
exception = f.exception()
traceback = f.traceback()
exc = exc[0]
self.assertEqual(exc[1], exception)
self.assertEqual(exc[2], traceback)
def test_result_raises_exception_with_traceback(self):
f = Future()
exc_info = None
try:
{}["invalid_key"]
except KeyError as e:
exc_info = sys.exc_info()
f.set_exception(e, sys.exc_info()[2])
try:
f.result()
self.fail("Future.result() should raise exception")
except:
info = sys.exc_info()
self.assertEqual(info[1], exc_info[1])
original_tb = traceback.extract_tb(exc_info[2])
# shift traceback to discard the last frames
shift = 2 if six.PY2 else 3
actual_tb = traceback.extract_tb(info[2])[shift:]
self.assertEqual(original_tb, actual_tb)
def test_add_callback_with_success(self):
f = Future()
e = Event()
def set_result():
f.set_result("done")
def callback(future):
self.assertEqual(future.result(), "done")
e.set()
f.add_done_callback(callback)
Thread(target=set_result).start()
self.assertTrue(e.wait(timeout=5))
def test_add_callback_with_failure(self):
f = Future()
e = Event()
exc = []
def set_exception():
map = {}
try:
a = map["invalid_key"]
except KeyError as e:
exc.append(sys.exc_info())
f.set_exception(e, sys.exc_info()[2])
def callback(future):
exc_info = exc[0]
self.assertEqual(exc_info[1], future.exception())
self.assertEqual(exc_info[2], future.traceback())
e.set()
f.add_done_callback(callback)
Thread(target=set_exception).start()
self.assertTrue(e.wait(timeout=5))
def test_add_callback_after_completion_with_success(self):
f = Future()
f.set_result("done")
counter = [0]
def callback(future):
counter[0] += 1
self.assertEqual(future.result(), "done")
f.add_done_callback(callback)
self.assertEqual(counter[0], 1)
def test_add_callback_after_completion_with_error(self):
f = Future()
error = RuntimeError("error")
f.set_exception(error, None)
counter = [0]
def callback(future):
counter[0] += 1
self.assertEqual(future.exception(), error)
f.add_done_callback(callback)
self.assertEqual(counter[0], 1)
def test_get_result_from_reactor_thread(self):
f = Future()
e = Event()
def get_result():
f._threading_locals.is_reactor_thread = True
try:
f.result()
except RuntimeError:
e.set()
Thread(target=get_result).start()
self.assertTrue(e.wait(5), "event was not set")
def test_get_exception_from_reactor_thread(self):
f = Future()
e = Event()
def get_result():
f._threading_locals.is_reactor_thread = True
try:
f.exception()
except RuntimeError:
e.set()
Thread(target=get_result).start()
self.assertTrue(e.wait(5), "event was not set")
def test_continue_with_on_success(self):
f = Future()
f.set_result(1)
def continuation(future):
return future.result() + 1
result = f.continue_with(continuation).result()
self.assertEqual(2, result)
def test_continue_with_on_failure(self):
f = Future()
f.set_exception(RuntimeError("error"))
def continuation(future):
if future.is_success():
return 0
else:
return 1
result = f.continue_with(continuation).result()
self.assertEqual(1, result)
def test_continue_with_with_resolved_future(self):
f1 = Future()
f2 = Future()
f3 = f1.continue_with(lambda _: f2)
f1.set_result(0)
f2.set_result(1)
self.assertEqual(1, f3.result())
def test_continue_with_with_rejected_future(self):
f1 = Future()
f2 = Future()
f3 = f1.continue_with(lambda _: f2)
f1.set_result(0)
f2.set_exception(RuntimeError("error"))
with self.assertRaises(RuntimeError):
f3.result()
def test_continue_with_future_returning_future(self):
f1 = Future()
f2 = Future()
f3 = Future()
f4 = f1.continue_with(lambda _: f2)
f1.set_result(0)
f2.set_result(f3)
f3.set_result(1)
self.assertEqual(1, f4.result())
def test_callback_called_exactly_once(self):
for _ in range(0, 100):
f = Future()
def set_result():
f.set_result("done")
t = Thread(target=set_result)
t.start()
i = [0]
def callback(c):
i[0] += 1
f.add_done_callback(callback)
t.join()
self.assertEqual(i[0], 1)
def test_callback_called_exactly_once_when_exception(self):
for _ in range(0, 100):
f = Future()
def set_exception():
f.set_exception(RuntimeError("error"))
t = Thread(target=set_exception)
t.start()
i = [0]
def callback(c):
i[0] += 1
f.add_done_callback(callback)
t.join()
self.assertEqual(i[0], 1)
def test_done(self):
f = Future()
self.assertFalse(f.done())
f.set_result("done")
self.assertTrue(f.done())
def test_running(self):
f = Future()
self.assertTrue(f.running())
f.set_result("done")
self.assertFalse(f.running())
def test_set_exception_with_non_exception(self):
f = Future()
with self.assertRaises(RuntimeError):
f.set_exception("non-exception")
def test_callback_throws_exception(self):
f = Future()
def invalid_func(_):
raise RuntimeError("error!")
f.add_done_callback(invalid_func)
f.set_result("done") # should not throw exception
def test_continue_with_throws_exception(self):
f = Future()
e = RuntimeError("error")
def continue_func(_):
raise e
n = f.continue_with(continue_func)
f.set_result("done")
self.assertFalse(n.is_success())
self.assertEqual(n.exception(), e)
class ImmediateFutureTest(unittest.TestCase):
f = None
def setUp(self):
self.f = ImmediateFuture("done")
def test_result(self):
self.assertEqual("done", self.f.result())
def test_exception(self):
self.assertIsNone(self.f.exception())
def test_traceback(self):
self.assertIsNone(self.f.traceback())
def test_set_result(self):
with self.assertRaises(NotImplementedError):
self.f.set_result("done")
def test_set_exception(self):
with self.assertRaises(NotImplementedError):
self.f.set_exception(RuntimeError())
def test_is_succcess(self):
self.assertTrue(self.f.is_success())
def test_is_done(self):
self.assertTrue(self.f.done())
def test_is_not_running(self):
self.assertFalse(self.f.running())
def test_callback(self):
n = [0]
def callback(f):
self.assertEqual(f, self.f)
n[0] += 1
self.f.add_done_callback(callback)
self.assertEqual(n[0], 1)
class ImmediateExceptionFutureTest(unittest.TestCase):
f = None
def setUp(self):
try:
raise RuntimeError("error")
except:
self.exc = sys.exc_info()[1]
self.traceback = sys.exc_info()[2]
self.f = ImmediateExceptionFuture(self.exc, self.traceback)
def test_result(self):
with self.assertRaises(type(self.exc)):
self.f.result()
def test_exception(self):
self.assertEqual(self.exc, self.f.exception())
def test_traceback(self):
self.assertEqual(self.traceback, self.f.traceback())
def test_set_result(self):
with self.assertRaises(NotImplementedError):
self.f.set_result("done")
def test_set_exception(self):
with self.assertRaises(NotImplementedError):
self.f.set_exception(RuntimeError())
def test_is_succcess(self):
self.assertFalse(self.f.is_success())
def test_is_done(self):
self.assertTrue(self.f.done())
def test_is_not_running(self):
self.assertFalse(self.f.running())
def test_callback(self):
n = [0]
def callback(f):
self.assertEqual(f, self.f)
n[0] += 1
self.f.add_done_callback(callback)
self.assertEqual(n[0], 1)
class CombineFutureTest(unittest.TestCase):
def test_combine_futures(self):
f1, f2, f3 = Future(), Future(), Future()
combined = combine_futures([f1, f2, f3])
f1.set_result("done1")
self.assertFalse(combined.done())
f2.set_result("done2")
self.assertFalse(combined.done())
f3.set_result("done3")
self.assertEqual(combined.result(), ["done1", "done2", "done3"])
def test_combine_futures_exception(self):
f1, f2, f3 = Future(), Future(), Future()
combined = combine_futures([f1, f2, f3])
e = RuntimeError("error")
f1.set_result("done")
f2.set_result("done")
f3.set_exception(e)
self.assertEqual(e, combined.exception())
def test_combine_futures_with_empty_list(self):
combined = combine_futures([])
self.assertTrue(combined.done())
self.assertEqual([], combined.result())
class MakeBlockingTest(unittest.TestCase):
class Calculator(object):
def __init__(self):
self.name = "calc"
def add_one(self, x):
f = Future()
f.set_result(x + 1)
return f
def multiply(self, x, y):
f = Future()
f.set_result(x * y)
return f
def multiply_sync(self, x, y):
return x * y
def setUp(self):
self.calculator = make_blocking(MakeBlockingTest.Calculator())
def test_args(self):
self.assertEqual(self.calculator.add_one(1), 2)
def test_kwargs(self):
self.assertEqual(self.calculator.multiply(x=4, y=5), 20)
def test_blocking_method(self):
self.assertEqual(self.calculator.multiply_sync(x=4, y=5), 20)
def test_missing_method(self):
with self.assertRaises(AttributeError):
self.calculator.missing_method()
def test_attribute(self):
self.assertEqual(self.calculator.name, "calc")
|
test_web.py
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from datetime import date
import threading
from django.core.urlresolvers import reverse
from django.db import IntegrityError, connection
from django.test.client import Client
from django.utils import timezone
from rest_framework.test import APITransactionTestCase
from cabot.cabotapp.models import Service, HttpStatusCheck, ServiceStatusSnapshot
from cabot.cabotapp.views import StatusCheckReportForm, ServiceListView
from cabot.cabotapp.tests.utils import LocalTestCase
class TestWebInterface(LocalTestCase):
def setUp(self):
super(TestWebInterface, self).setUp()
self.client = Client()
def test_set_recovery_instructions(self):
# Get service page - will get 200 from login page
resp = self.client.get(reverse('update-service', kwargs={'pk': self.service.id}), follow=True)
self.assertEqual(resp.status_code, 200)
# Log in
self.client.login(username=self.username, password=self.password)
resp = self.client.get(reverse('update-service', kwargs={'pk': self.service.id}))
self.assertEqual(resp.status_code, 200)
self.assertNotIn('username', resp.content)
snippet_link = 'https://sub.hackpad.com/wiki-7YaNlsC11bB.js'
self.assertEqual(self.service.hackpad_id, None)
resp = self.client.post(
reverse('update-service', kwargs={'pk': self.service.id}),
data={
'name': self.service.name,
'hackpad_id': snippet_link,
},
follow=True,
)
self.assertEqual(resp.status_code, 200)
reloaded = Service.objects.get(id=self.service.id)
self.assertEqual(reloaded.hackpad_id, snippet_link)
# Now one on the blacklist
blacklist_link = 'https://unapproved_link.domain.com/wiki-7YaNlsC11bB.js'
resp = self.client.post(
reverse('update-service', kwargs={'pk': self.service.id}),
data={
'name': self.service.name,
'hackpad_id': blacklist_link,
},
follow=True,
)
self.assertEqual(resp.status_code, 200)
self.assertIn('valid JS snippet link', resp.content)
reloaded = Service.objects.get(id=self.service.id)
# Still the same
self.assertEqual(reloaded.hackpad_id, snippet_link)
def test_checks_report(self):
form = StatusCheckReportForm({
'service': self.service.id,
'checks': [self.http_check.id],
'date_from': date.today() - timedelta(days=1),
'date_to': date.today(),
})
self.assertTrue(form.is_valid())
checks = form.get_report()
self.assertEqual(len(checks), 1)
check = checks[0]
self.assertEqual(len(check.problems), 1)
self.assertEqual(check.success_rate, 50)
def test_services_list(self):
"""test the services list queryset, since it uses some custom SQL for the active/inactive check counts"""
# add a disabled check
inactive_check = HttpStatusCheck(active=False)
inactive_check.save()
self.service.status_checks.add(inactive_check)
self.service.save()
qs = ServiceListView().get_queryset().all()
self.assertEquals(len(qs), 1)
service = qs[0]
# check the extra fields are correct
self.assertEquals(service.active_checks_count, 3)
self.assertEquals(service.inactive_checks_count, 1)
class TestWebConcurrency(APITransactionTestCase):
def setUp(self):
self.http_check = HttpStatusCheck.objects.create(
id=10102,
name='Http Check',
importance=Service.CRITICAL_STATUS,
endpoint='http://arachnys.com',
timeout=10,
status_code='200',
text_match=None,
)
self.service = Service.objects.create(
id=2194, name='Service',
)
self.service.save()
self.service.status_checks.add(self.http_check)
self.client = Client()
def test_delete_service(self):
# first, generate a lot of snapshots so the on service delete cascade will take time
ServiceStatusSnapshot.objects.bulk_create([
ServiceStatusSnapshot(service=self.service, time=timezone.now()) for _ in range(30000)
])
exceptions = []
def delete_service():
try:
Service.objects.get(pk=self.service.pk).delete()
except Exception, e:
exceptions.append(e)
# note exceptions raised in another thread won't cause the main thread to fail the test
# but we raise anyway so the exception gets printed to stderr
raise
finally:
# manually close DB connection, otherwise it stays open and tests can't terminate cleanly
connection.close()
# start deleting
delete_thread = threading.Thread(target=delete_service)
delete_thread.start()
# simultaneously keep creating snapshots in the background
try:
for _ in range(100):
self.service.update_status()
except IntegrityError:
# this happens if the delete succeeded (service no longer exists) and is expected
pass
delete_thread.join()
# check if delete_service failed
self.assertEquals(len(exceptions), 0)
self.assertFalse(Service.objects.filter(pk=self.service.pk).exists())
|
server.py
|
import socket ## used for connecting users together ##
import threading ## used to manage each user in a seperate thread ##
import pickle ## used to transfer data across the internet, similar to JSON ##
from chess.layoutBoardObject import Board ## used to get Board class to get an object and save each game in a list ##
# used for server side of chess
HEADER = 64
PORT = 5050
SERVER = socket.gethostbyname(socket.gethostname())
ADDRESS = (SERVER, PORT)
FORMAT = "utf-8"
DISCONNECT_MESSAGE = "!DISCONNECT"
totalConn = 0
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.bind(ADDRESS)
except socket.error:
str(socket.error)
# This will store all games, first term = game number, second = actual board
allChessGames = {}
def handle_client(conn, address, chessGame, gameNumber):
global totalConn
print(f"NEW CONNECTION {address}")
# all games are full
colourId = "b"
if totalConn % 2 == 0:
colourId = "w"
# send what colour the player is
conn.send(colourId.encode(FORMAT))
# send board across socket by using pickle to "dump" it
boardString = pickle.dumps(chessGame)
conn.send(boardString)
totalConn += 1
connected = True
while connected:
d = conn.recv(942)
try:
data = d.decode("utf-8")
except UnicodeDecodeError:
print(f"Bytes data = {d}")
print(f"Length of data = {len(d)}")
print(pickle.loads(d))
if not d:
break
if data == "":
continue
if data != "GetBoardPosition":
print(f"[DATA] = {data}")
if "Move" in data:
# for moving pieces
# "Move row column mousePos[0] mousePos[1]"
fullData = data.split(" ")
prevRow = int(fullData[1])
prevCol = int(fullData[2])
mousePosOne = int(fullData[3])
mousePosTwo = int(fullData[4])
mousePos = (mousePosOne, mousePosTwo)
print(fullData, mousePosOne // 70, (mousePosTwo - 110) // 70)
playerMoved = chessGame.movePossible(mousePos, prevCol, prevRow)
playerMoved = str(playerMoved)
conn.sendall(playerMoved.encode(FORMAT))
elif "GetPossibleMoves" in data:
# return the possible moves
possibleMoves = chessGame.possible
possibleMoves = pickle.dumps(possibleMoves)
conn.sendall(possibleMoves)
elif "GetBoardObject" in data:
# return the current board object
data = pickle.dumps(chessGame)
conn.sendall(data)
elif "SetPossible" in data:
# send message to confirm
conn.send("y".encode(FORMAT))
# Set the possible moves
print("[RECEIVED] Set Possible received")
possibleMoves = conn.recv(1024)
possibleMoves = pickle.loads(possibleMoves)
print(possibleMoves)
chessGame.possible = possibleMoves
elif "GetBoardPosition" in data:
# Return current board position
boardPosition = chessGame.board
boardPosition = pickle.dumps(boardPosition)
conn.sendall(boardPosition)
elif "CheckOtherPlayer" in data:
# Return if the player with the black pieces is in the game
# check if the total number of connections are even or odd
otherPlayer = False
if totalConn % 2 == 0:
otherPlayer = True
conn.send(otherPlayer)
totalConn -= 1
conn.close()
def start():
global totalConn
server.listen()
print("[LISTENING] on " + SERVER)
while True:
conn, address = server.accept()
print("[CONNECTED] PLAYER JOINED")
totalConn = threading.activeCount() - 1
# check if there is another game with only one player
if totalConn % 2 == 0:
# if not, add a new chess game to all chess games dictionary
chessGame = Board()
allChessGames[totalConn] = chessGame
else:
# chess game is the newest on the dictionary
chessGame = allChessGames[totalConn - 1]
thread = threading.Thread(target=handle_client, args=(conn, address, chessGame, totalConn))
thread.start()
print(f" \nACTIVE CONNECTIONS: {threading.activeCount() - 1}")
print("[STARTING] server is starting...")
start()
|
main.py
|
import requests
import json
import numpy as np
import pandas as pd
import time
import datetime
import talib
import oandapyV20
from oandapyV20.contrib.requests import MarketOrderRequest
from oandapyV20.contrib.requests import TakeProfitDetails, StopLossDetails
import oandapyV20.endpoints.orders as orders
import oandapyV20.endpoints.positions as positions
import oandapyV20.endpoints.pricing as pricing
from oandapyV20 import API
import oandapyV20.endpoints.instruments as instruments
from multiprocessing import Process
def EURTRADE():
ACCESS_TOKEN = "YOUR OANDA API TOKEN"
ACCOUNT_ID = "YOUR ACCOUNT ID"
api = oandapyV20.API(access_token=ACCESS_TOKEN)
close_prices = []
open_prices = []
low_prices = []
high_prices = []
volume_numbers = []
list_of_time = []
candleopen = []
candleclose = []
candlehigh = []
candlelow = []
candlevolume = []
list_shortsl = []
list_longsl = []
quantity = 7500
long_regulation_trade = [quantity, quantity + 500, quantity + 1000, quantity + 2000, quantity + 4000]
long_regulation_number = 0
short_regulation_trade = [quantity, quantity + 500, quantity + 1000, quantity + 2000, quantity + 4000]
short_regulation_number = 0
while True:
params = {
"count": 7,
"granularity": "M2"
}
instrum = instruments.InstrumentsCandles(instrument="EUR_USD", params=params)
json_response = api.request(instrum)
for candlenum in range(len(json_response["candles"])):
openprice = float(json_response["candles"][candlenum]['mid']['o'])
closeprice = float(json_response["candles"][candlenum]['mid']['c'])
highprice = float(json_response["candles"][candlenum]['mid']['h'])
lowprice = float(json_response["candles"][candlenum]['mid']['l'])
volume = float(json_response["candles"][candlenum]['volume'])
timestamped = (json_response["candles"][candlenum]['time'])
if timestamped not in list_of_time:
list_of_time.append(timestamped)
if len(candleopen) >= 1:
close_prices.append(candleclose[-1])
open_prices.append(candleopen[-1])
low_prices.append(candlelow[-1])
high_prices.append(candlehigh[-1])
volume_numbers.append(candlevolume[-1])
#reset candles
candleopen = []
candleclose = []
candlehigh = []
candlelow = []
candlevolume = []
if len(close_prices) > 7:
#Pricing info- bid and ask
query = {"instruments": "EUR_USD"}
pricingrequest = pricing.PricingInfo(accountID=ACCOUNT_ID, params=query)
recievedrequest = api.request(pricingrequest)
bidprice = pricingrequest.response['prices'][0]['bids'][0]['price']
askprice = pricingrequest.response['prices'][0]['asks'][0]['price']
shorttp = round(float(bidprice) * 1.0058, 6)
shortsl = round(float(askprice) * 0.9988, 6)
longtp = round(float(bidprice) * 0.9942, 6)
longsl = round(float(askprice) * 1.0012, 6)
#Account info- open positions
account_details = positions.OpenPositions(accountID=ACCOUNT_ID)
api.request(account_details)
#Market order creation- short position
shortmktOrder = MarketOrderRequest(
instrument="EUR_USD",
units= - (short_regulation_trade[short_regulation_number]),
takeProfitOnFill=TakeProfitDetails(price=shorttp).data,
stopLossOnFill=StopLossDetails(price=shortsl).data)
shortordercreation = orders.OrderCreate(ACCOUNT_ID, data=shortmktOrder.data)
#Market order creation- long position
longmktOrder = MarketOrderRequest(
instrument="EUR_USD",
units= (long_regulation_trade[long_regulation_number]),
takeProfitOnFill=TakeProfitDetails(price=longtp).data,
stopLossOnFill=StopLossDetails(price=longsl).data)
longordercreation = orders.OrderCreate(ACCOUNT_ID, data=longmktOrder.data)
numclose = np.array(close_prices)
numopen = np.array(open_prices)
numlow = np.array(low_prices)
numhigh = np.array(high_prices)
numvolume = np.array(volume_numbers)
rsii = talib.RSI(numclose, 7)
mfi = talib.MFI(numhigh, numlow, numclose, numvolume, 7)
atr = talib.ATR(numhigh, numlow, numclose, 7)
print("EUR_USD: The current MFI is {}.".format(mfi[-1]))
print("EUR_USD: The current RSI is {}.".format(rsii[-1]))
print("EUR_USD: The current ATR is {}.".format(atr[-1]))
print("------------------------------------------------------------------------------------------------------------------------")
if float(bidprice) / float(askprice) >= 0.99984:
if short_regulation_number == 6:
pass
else:
if rsii[-1] >= 80 and rsii[-2] >= 80 and rsii[-3] >= 80:
list_shortsl.append(shortsl)
try:
# create the OrderCreate request
rv = api.request(shortordercreation)
except oandapyV20.exceptions.V20Error as err:
print(shortordercreation.status_code, err)
else:
print('EUR_USD: Short order for {} units created at {}'.format(short_regulation_trade[short_regulation_number], askprice))
short_regulation_number += 1
if long_regulation_number == 6:
pass
else:
if rsii[-1] <= 20 and rsii[-2] <= 20 and rsii[-3] <= 20:
list_longsl.append(longsl)
try:
# create the OrderCreate request
rv = api.request(longordercreation)
except oandapyV20.exceptions.V20Error as err:
print(longordercreation.status_code, err)
else:
print('EUR_USD: Long for {} units created at {}'.format(long_regulation_trade[long_regulation_number], bidprice))
long_regulation_number += 1
else:
print("Bid/ask too wide for entry.")
if mfi[-1] <= 5:
for i in range(len(account_details.response['positions'])):
try:
if account_details.response['positions'][i]['instrument'] == 'EUR_USD':
if float(account_details.response['positions'][i]['short']['units']) < 0:
units_available = int(account_details.response['positions'][i]['short']['units'])
mktOrder = MarketOrderRequest(
instrument="EUR_USD",
units = -(units_available),
)
r = orders.OrderCreate(ACCOUNT_ID, data=longmktOrder.data)
try:
# create the OrderCreate request
rv = api.request(r)
except oandapyV20.exceptions.V20Error as err:
print(r.status_code, err)
else:
print('EUR_USD: Cover short for {} units created at {}'.format(units_available, askprice))
short_regulation_number = 0
except IndexError:
pass
except KeyError:
pass
if mfi[-1] >= 95:
for i in range(len(account_details.response['positions'])):
try:
if account_details.response['positions'][i]['instrument'] == 'EUR_USD':
if float(account_details.response['positions'][i]['long']['units']) > 0:
units_available = int(account_details.response['positions'][i]['long']['units'])
mktOrder = MarketOrderRequest(
instrument="EUR_USD",
units= -(units_available),
)
r = orders.OrderCreate(ACCOUNT_ID, data=mktOrder.data)
try:
# create the OrderCreate request
rv = api.request(r)
except oandapyV20.exceptions.V20Error as err:
print(r.status_code, err)
else:
print('EUR_USD: Selling off {} long units created at {}'.format(units_available, bidprice))
long_regulation_number = 0
except IndexError:
pass
except KeyError:
pass
else:
candleopen.append(openprice)
candleclose.append(closeprice)
candlelow.append(lowprice)
candlehigh.append(highprice)
candlevolume.append(volume)
def CAD():
ACCESS_TOKEN = "YOUR OANDA API TOKEN"
ACCOUNT_ID = "YOUR ACCOUNT ID"
api = oandapyV20.API(access_token=ACCESS_TOKEN)
close_prices = []
open_prices = []
low_prices = []
high_prices = []
volume_numbers = []
list_of_time = []
candleopen = []
candleclose = []
candlehigh = []
candlelow = []
candlevolume = []
list_shortsl = []
list_longsl = []
quantity = 7500
long_regulation_trade = [quantity, quantity + 500, quantity + 1000, quantity + 2000, quantity + 4000]
long_regulation_number = 0
short_regulation_trade = [quantity, quantity + 500, quantity + 1000, quantity + 2000, quantity + 4000]
short_regulation_number = 0
while True:
params = {
"count": 7,
"granularity": "M2"
}
instrum = instruments.InstrumentsCandles(instrument="USD_CAD", params=params)
json_response = api.request(instrum)
for candlenum in range(len(json_response["candles"])):
openprice = float(json_response["candles"][candlenum]['mid']['o'])
closeprice = float(json_response["candles"][candlenum]['mid']['c'])
highprice = float(json_response["candles"][candlenum]['mid']['h'])
lowprice = float(json_response["candles"][candlenum]['mid']['l'])
volume = float(json_response["candles"][candlenum]['volume'])
timestamped = (json_response["candles"][candlenum]['time'])
if timestamped not in list_of_time:
list_of_time.append(timestamped)
if len(candleopen) >= 1:
close_prices.append(candleclose[-1])
open_prices.append(candleopen[-1])
low_prices.append(candlelow[-1])
high_prices.append(candlehigh[-1])
volume_numbers.append(candlevolume[-1])
#reset candles
candleopen = []
candleclose = []
candlehigh = []
candlelow = []
candlevolume = []
if len(close_prices) > 7:
#Pricing info- bid and ask
query = {"instruments": "USD_CAD"}
pricingrequest = pricing.PricingInfo(accountID=ACCOUNT_ID, params=query)
recievedrequest = api.request(pricingrequest)
bidprice = pricingrequest.response['prices'][0]['bids'][0]['price']
askprice = pricingrequest.response['prices'][0]['asks'][0]['price']
shorttp = round(float(bidprice) * 1.0058, 6)
shortsl = round(float(askprice) * 0.9988, 6)
longtp = round(float(bidprice) * 0.9942, 6)
longsl = round(float(askprice) * 1.0012, 6)
#Account info- open positions
account_details = positions.OpenPositions(accountID=ACCOUNT_ID)
api.request(account_details)
#Market order creation- short position
shortmktOrder = MarketOrderRequest(
instrument="USD_CAD",
units= - (short_regulation_trade[short_regulation_number]),
takeProfitOnFill=TakeProfitDetails(price=shorttp).data,
stopLossOnFill=StopLossDetails(price=shortsl).data)
shortordercreation = orders.OrderCreate(ACCOUNT_ID, data=shortmktOrder.data)
#Market order creation- long position
longmktOrder = MarketOrderRequest(
instrument="USD_CAD",
units= (long_regulation_trade[long_regulation_number]),
takeProfitOnFill=TakeProfitDetails(price=longtp).data,
stopLossOnFill=StopLossDetails(price=longsl).data)
longordercreation = orders.OrderCreate(ACCOUNT_ID, data=longmktOrder.data)
numclose = np.array(close_prices)
numopen = np.array(open_prices)
numlow = np.array(low_prices)
numhigh = np.array(high_prices)
numvolume = np.array(volume_numbers)
rsii = talib.RSI(numclose, 7)
mfi = talib.MFI(numhigh, numlow, numclose, numvolume, 7)
atr = talib.ATR(numhigh, numlow, numclose, 7)
print("USD_CAD: The current MFI is {}.".format(mfi[-1]))
print("USD_CAD: The current RSI is {}.".format(rsii[-1]))
print("USD_CAD: The current ATR is {}.".format(atr[-1]))
print("------------------------------------------------------------------------------------------------------------------------")
if float(bidprice) / float(askprice) >= 0.99985:
if short_regulation_number == 6:
pass
else:
if rsii[-1] >= 75 and rsii[-2] >= 75 and rsii[-3] >= 75 and atr[-1] <= 0.00035:
list_shortsl.append(shortsl)
try:
# create the OrderCreate request
rv = api.request(shortordercreation)
except oandapyV20.exceptions.V20Error as err:
print(shortordercreation.status_code, err)
else:
print('USD_CAD: Short order for {} units created at {}'.format(short_regulation_trade[short_regulation_number], askprice))
short_regulation_number += 1
if long_regulation_number == 6:
pass
else:
if rsii[-1] <= 25 and rsii[-2] <= 25 and rsii[-3] <= 25 and atr[-1] <= 0.00035:
list_longsl.append(longsl)
try:
# create the OrderCreate request
rv = api.request(longordercreation)
except oandapyV20.exceptions.V20Error as err:
print(longordercreation.status_code, err)
else:
print('USD_CAD: Long for {} units created at {}'.format(long_regulation_trade[long_regulation_number], bidprice))
long_regulation_number += 1
else:
print("Bid/ask too wide for entry.")
if mfi[-1] <= 10:
for i in range(len(account_details.response['positions'])):
try:
if account_details.response['positions'][i]['instrument'] == 'USD_CAD':
if float(account_details.response['positions'][i]['short']['units']) < 0:
units_available = int(account_details.response['positions'][i]['short']['units'])
mktOrder = MarketOrderRequest(
instrument="USD_CAD",
units = -(units_available),
)
r = orders.OrderCreate(ACCOUNT_ID, data=longmktOrder.data)
try:
# create the OrderCreate request
rv = api.request(r)
except oandapyV20.exceptions.V20Error as err:
print(r.status_code, err)
else:
print('USD_CAD: Cover short for {} units created at {}'.format(units_available, askprice))
short_regulation_number = 0
except IndexError:
pass
except KeyError:
pass
if mfi[-1] >= 90:
for i in range(len(account_details.response['positions'])):
try:
if account_details.response['positions'][i]['instrument'] == 'USD_CAD':
if float(account_details.response['positions'][i]['long']['units']) > 0:
units_available = int(account_details.response['positions'][i]['long']['units'])
mktOrder = MarketOrderRequest(
instrument="USD_CAD",
units= -(units_available),
)
r = orders.OrderCreate(ACCOUNT_ID, data=mktOrder.data)
try:
# create the OrderCreate request
rv = api.request(r)
except oandapyV20.exceptions.V20Error as err:
print(r.status_code, err)
else:
print('USD_CAD: Selling off {} long units created at {}'.format(units_available, bidprice))
long_regulation_number = 0
except IndexError:
pass
except KeyError:
pass
else:
candleopen.append(openprice)
candleclose.append(closeprice)
candlelow.append(lowprice)
candlehigh.append(highprice)
candlevolume.append(volume)
if __name__=='__main__':
p1 = Process(target = EURTRADE)
p2 = Process(target = CAD)
p1.start()
p2.start()
p1.join()
p2.join()
|
QSubprocessor.py
|
import multiprocessing
import sys
import time
import traceback
from PySide6.QtCore import *
from PySide6.QtGui import *
from PySide6.QtWidgets import *
from .qtex import *
class QSubprocessor(object):
"""
"""
class Cli(object):
def __init__ ( self, client_dict ):
s2c = multiprocessing.Queue()
c2s = multiprocessing.Queue()
self.p = multiprocessing.Process(target=self._subprocess_run, args=(client_dict,s2c,c2s) )
self.s2c = s2c
self.c2s = c2s
self.p.daemon = True
self.p.start()
self.state = None
self.sent_time = None
self.sent_data = None
self.name = None
self.host_dict = None
def kill(self):
self.p.terminate()
self.p.join()
#overridable optional
def on_initialize(self, client_dict):
#initialize your subprocess here using client_dict
pass
#overridable optional
def on_finalize(self):
#finalize your subprocess here
pass
#overridable
def process_data(self, data):
#process 'data' given from host and return result
raise NotImplementedError
#overridable optional
def get_data_name (self, data):
#return string identificator of your 'data'
return "undefined"
def log_info(self, msg): self.c2s.put ( {'op': 'log_info', 'msg':msg } )
def log_err(self, msg): self.c2s.put ( {'op': 'log_err' , 'msg':msg } )
def progress_bar_inc(self, c): self.c2s.put ( {'op': 'progress_bar_inc' , 'c':c } )
def _subprocess_run(self, client_dict, s2c, c2s):
self.c2s = c2s
data = None
try:
self.on_initialize(client_dict)
c2s.put ( {'op': 'init_ok'} )
while True:
msg = s2c.get()
op = msg.get('op','')
if op == 'data':
data = msg['data']
result = self.process_data (data)
c2s.put ( {'op': 'success', 'data' : data, 'result' : result} )
data = None
elif op == 'close':
break
time.sleep(0.001)
self.on_finalize()
c2s.put ( {'op': 'finalized'} )
except Exception as e:
c2s.put ( {'op': 'error', 'data' : data} )
if data is not None:
print ('Exception while process data [%s]: %s' % (self.get_data_name(data), traceback.format_exc()) )
else:
print ('Exception: %s' % (traceback.format_exc()) )
c2s.close()
s2c.close()
self.c2s = None
# disable pickling
def __getstate__(self):
return dict()
def __setstate__(self, d):
self.__dict__.update(d)
#overridable
def __init__(self, name, SubprocessorCli_class, no_response_time_sec = 0, io_loop_sleep_time=0.005):
if not issubclass(SubprocessorCli_class, QSubprocessor.Cli):
raise ValueError("SubprocessorCli_class must be subclass of QSubprocessor.Cli")
self.name = name
self.SubprocessorCli_class = SubprocessorCli_class
self.no_response_time_sec = no_response_time_sec
self.io_loop_sleep_time = io_loop_sleep_time
self.clis = []
#getting info about name of subprocesses, host and client dicts, and spawning them
for name, host_dict, client_dict in self.process_info_generator():
try:
cli = self.SubprocessorCli_class(client_dict)
cli.state = 1
cli.sent_time = 0
cli.sent_data = None
cli.name = name
cli.host_dict = host_dict
self.clis.append (cli)
except:
raise Exception (f"Unable to start subprocess {name}. Error: {traceback.format_exc()}")
if len(self.clis) == 0:
raise Exception ("Unable to start QSubprocessor '%s' " % (self.name))
#waiting subprocesses their success(or not) initialization
while True:
for cli in self.clis[:]:
while not cli.c2s.empty():
obj = cli.c2s.get()
op = obj.get('op','')
if op == 'init_ok':
cli.state = 0
elif op == 'log_info':
print(obj['msg'])
elif op == 'log_err':
print(obj['msg'])
elif op == 'error':
cli.kill()
self.clis.remove(cli)
break
if all ([cli.state == 0 for cli in self.clis]):
break
time.sleep(0.005)
if len(self.clis) == 0:
raise Exception ( "Unable to start subprocesses." )
#ok some processes survived, initialize host logic
self.on_clients_initialized()
self.q_timer = QTimer()
self.q_timer.timeout.connect(self.tick)
self.q_timer.start(5)
#overridable
def process_info_generator(self):
#yield per process (name, host_dict, client_dict)
for i in range(min(multiprocessing.cpu_count(), 8) ):
yield 'CPU%d' % (i), {}, {}
#overridable optional
def on_clients_initialized(self):
#logic when all subprocesses initialized and ready
pass
#overridable optional
def on_clients_finalized(self):
#logic when all subprocess finalized
pass
#overridable
def get_data(self, host_dict):
#return data for processing here
raise NotImplementedError
#overridable
def on_data_return (self, host_dict, data):
#you have to place returned 'data' back to your queue
raise NotImplementedError
#overridable
def on_result (self, host_dict, data, result):
#your logic what to do with 'result' of 'data'
raise NotImplementedError
def tick(self):
for cli in self.clis[:]:
while not cli.c2s.empty():
obj = cli.c2s.get()
op = obj.get('op','')
if op == 'success':
#success processed data, return data and result to on_result
self.on_result (cli.host_dict, obj['data'], obj['result'])
self.sent_data = None
cli.state = 0
elif op == 'error':
#some error occured while process data, returning chunk to on_data_return
if 'data' in obj.keys():
self.on_data_return (cli.host_dict, obj['data'] )
#and killing process
cli.kill()
self.clis.remove(cli)
elif op == 'log_info':
print(obj['msg'])
elif op == 'log_err':
print(obj['msg'])
elif op == 'progress_bar_inc':
...
#io.progress_bar_inc(obj['c'])
for cli in self.clis[:]:
if cli.state == 1:
if cli.sent_time != 0 and self.no_response_time_sec != 0 and (time.time() - cli.sent_time) > self.no_response_time_sec:
#subprocess busy too long
print ( '%s doesnt response, terminating it.' % (cli.name) )
self.on_data_return (cli.host_dict, cli.sent_data )
cli.kill()
self.clis.remove(cli)
for cli in self.clis[:]:
if cli.state == 0:
#free state of subprocess, get some data from get_data
data = self.get_data(cli.host_dict)
if data is not None:
#and send it to subprocess
cli.s2c.put ( {'op': 'data', 'data' : data} )
cli.sent_time = time.time()
cli.sent_data = data
cli.state = 1
if all ([cli.state == 0 for cli in self.clis]):
#gracefully terminating subprocesses
for cli in self.clis[:]:
cli.s2c.put ( {'op': 'close'} )
cli.sent_time = time.time()
while True:
for cli in self.clis[:]:
terminate_it = False
while not cli.c2s.empty():
obj = cli.c2s.get()
obj_op = obj['op']
if obj_op == 'finalized':
terminate_it = True
break
if (time.time() - cli.sent_time) > 30:
terminate_it = True
if terminate_it:
cli.state = 2
cli.kill()
if all ([cli.state == 2 for cli in self.clis]):
break
#finalizing host logic
self.q_timer.stop()
self.q_timer = None
self.on_clients_finalized()
|
main_window.py
|
import os
from PyQt4.QtGui import *
from PyQt4.QtCore import QThread
from lldbvis.debug import debugger
from lldbvis.events import signals
from lldbvis.events import dispatcher
from lldbvis.gui.widgets import CodeEditor
from lldbvis.settings import constants
class DebugThread(QThread):
def __init__(self, target, parent=None, *args):
QThread.__init__(self, parent=parent)
if not callable(target):
raise TypeError('Thread target must be a callable')
self.target = target
self.args = args
def run(self):
if len(self.args) > 0:
self.target(*self.args)
else:
self.target()
class RunArgumentsDialog(QDialog):
def __init__(self, parent=None, *args):
QWidget.__init__(self, parent, *args)
self.setLayout(QFormLayout())
self.fileName = None
self.fileNameEdit = QLineEdit()
self.fileNameEdit.textChanged.connect(self._checkExecutable)
self.fileButton = QPushButton('Select File')
self.fileButton.clicked.connect(self.selectFile)
self.layout().addRow(self.fileButton, self.fileNameEdit)
self.errorLabel = QLabel()
self.errorLabel.setStyleSheet('color: red')
self.layout().addRow(self.errorLabel)
self.errorLabel.hide()
button_box = QDialogButtonBox(self)
button_box.setStandardButtons(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
self.okButton = button_box.button(QDialogButtonBox.Ok)
self.okButton.clicked.connect(lambda: [self.setFileName(), self.close()])
self.cancelButton = button_box.button(QDialogButtonBox.Cancel)
self.cancelButton.clicked.connect(lambda: [self.resetFileName(), self.close()])
self.layout().addRow(button_box)
self.setWindowTitle('Run Arguments')
self.resize(constants.RUN_ARGUMENTS_DIALOG_SIZE)
def _checkExecutable(self, file_path):
if self._isExecutable(file_path):
if not self.errorLabel.isHidden():
self.errorLabel.hide()
self.okButton.setEnabled(True)
else:
self.errorLabel.setText('File is not an executable!')
self.okButton.setEnabled(False)
self.errorLabel.show()
def _isExecutable(self, file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
def setFileName(self):
self.fileName = self.fileNameEdit.text()
def resetFileName(self):
self.fileName = None
def selectFile(self):
file_dialog = QFileDialog()
file_name = file_dialog.getOpenFileName(self, 'Select File')
if file_name is not None and os.path.isfile(file_name):
self.fileNameEdit.setText(file_name)
def getFileName(self):
return str(self.fileName)
class VisWindow(QMainWindow):
def __init__(self, *argv):
QMainWindow.__init__(self, *argv)
self.debugThread = None
self.targetFile = None
self.workingDir = None
self.editor = CodeEditor()
self.runAction = None
self.runMenu = None
self.createMenuBar()
self.runArgumentsDialog = RunArgumentsDialog()
dispatcher.connect(lambda *args, **kwargs: self.runMenu.setEnabled(True), signal=signals.EndDebugger)
dispatcher.connect(self.openDeclaration, signal=signals.OpenDeclaration)
def createMenuBar(self):
mb = self.menuBar()
self.runAction = QAction('Run', self)
self.runAction.setDisabled(self.targetFile is None)
self.runAction.setShortcut('Shift+F10')
self.runAction.triggered.connect(self.runDebugger)
run_config_action = QAction('Set Run Arguments...', self)
run_config_action.triggered.connect(lambda: [self.setRunArguments(), self.enableRunAction()])
open_editor_action = QAction('Open Editor', self)
open_editor_action.triggered.connect(self.openEditor)
self.runMenu = mb.addMenu('Run')
self.runMenu.addAction(self.runAction)
self.runMenu.addAction(run_config_action)
editor_menu = mb.addMenu('Editor')
editor_menu.addAction(open_editor_action)
def openDeclaration(self, file_name, line):
self.openEditor()
self.editor.tabbed_editor.openFile(file_name)
try:
self.editor.tabbed_editor.currentWidget().scrollToLine(line)
except Exception:
pass
def openEditor(self):
if self.editor.isHidden():
self.editor.show()
else:
self.editor.raise_()
self.editor.activateWindow()
def runDebugger(self):
debugger.setup(self.workingDir, self.targetFile)
self.debugThread = DebugThread(parent=self, target=debugger.start)
self.debugThread.setTerminationEnabled(True)
self.debugThread.start()
self.runMenu.setEnabled(False)
def enableRunAction(self, *args, **kwargs):
if self.targetFile is not None:
self.runAction.setEnabled(True)
self.runAction.setText("Run '" + str(self.targetFile) + "'")
def setRunArguments(self):
self.runArgumentsDialog.exec_()
file_path = self.runArgumentsDialog.getFileName()
if file_path is not None and os.path.isfile(file_path):
file_name = os.path.basename(file_path)
self.workingDir = file_path.replace(file_name, '')
self.targetFile = file_name
|
training_scheduler.py
|
import torch
import threading
import subprocess
import multiprocessing as mp
import os
pruned_model_path="./pruned_models/vgg19_bn/"
retrained_model_path="./retrained_model/vgg19_bn/"
'''
1) initialize bounded producer/consumer queue of size max(num_devices (param), output from torch.cuda.device_count())
'''
def train(executables, allowable_devices=range(torch.cuda.device_count())):
free_devices = mp.Queue(maxsize=len(allowable_devices))
for i in allowable_devices:
free_devices.put(i)
for executable in executables:
assigned_device = free_devices.get()
print("script: '" + str(executable) + "' assigned to GPU: " + str(assigned_device))
mp.Process(target=execute_on_device, args=(assigned_device, executable, free_devices)).start()
def execute_on_device(GPU_ID, executable, free_devices):
# train the model
os.environ['CUDA_VISIBLE_DEVICES'] = str(GPU_ID)
executable_tokens = executable.split(" ")
stdout_file = None
if ">" in executable_tokens:
idx = executable_tokens.index(">")
stdout_file = open(executable_tokens[idx+1], "w")
executable_tokens = executable_tokens[:idx]
print(stdout_file)
subprocess.run(executable_tokens, stdout=stdout_file)
# mark this GPU as free
free_devices.put(GPU_ID)
if stdout_file is not None:
stdout_file.close()
def get_stdout(executable_tokens):
if '>' in executable_tokens:
return executable
else:
return None
if __name__ == '__main__':
to_train = [
"python3 cifar_binary.py --pruned -a vgg19_bn --lr 0.01 --epochs 40 --schedule 20 30 --gamma 0.1 --resume "+pruned_model_path+"vgg19_bn_0_pruned_model.pth --checkpoint "+retrained_model_path+"vgg19_bn_0_pruned_model --train-batch 64 --class-index 0",
"python3 cifar_binary.py --pruned -a vgg19_bn --lr 0.01 --epochs 40 --schedule 20 30 --gamma 0.1 --resume "+pruned_model_path+"vgg19_bn_1_pruned_model.pth --checkpoint "+retrained_model_path+"vgg19_bn_1_pruned_model --train-batch 64 --class-index 1",
"python3 cifar_binary.py --pruned -a vgg19_bn --lr 0.01 --epochs 40 --schedule 20 30 --gamma 0.1 --resume "+pruned_model_path+"vgg19_bn_2_pruned_model.pth --checkpoint "+retrained_model_path+"vgg19_bn_2_pruned_model --train-batch 64 --class-index 2",
"python3 cifar_binary.py --pruned -a vgg19_bn --lr 0.01 --epochs 40 --schedule 20 30 --gamma 0.1 --resume "+pruned_model_path+"vgg19_bn_3_pruned_model.pth --checkpoint "+retrained_model_path+"vgg19_bn_3_pruned_model --train-batch 64 --class-index 3",
"python3 cifar_binary.py --pruned -a vgg19_bn --lr 0.01 --epochs 40 --schedule 20 30 --gamma 0.1 --resume "+pruned_model_path+"vgg19_bn_4_pruned_model.pth --checkpoint "+retrained_model_path+"vgg19_bn_4_pruned_model --train-batch 64 --class-index 4",
"python3 cifar_binary.py --pruned -a vgg19_bn --lr 0.01 --epochs 40 --schedule 20 30 --gamma 0.1 --resume "+pruned_model_path+"vgg19_bn_5_pruned_model.pth --checkpoint "+retrained_model_path+"vgg19_bn_5_pruned_model --train-batch 64 --class-index 5",
"python3 cifar_binary.py --pruned -a vgg19_bn --lr 0.01 --epochs 40 --schedule 20 30 --gamma 0.1 --resume "+pruned_model_path+"vgg19_bn_6_pruned_model.pth --checkpoint "+retrained_model_path+"vgg19_bn_6_pruned_model --train-batch 64 --class-index 6",
"python3 cifar_binary.py --pruned -a vgg19_bn --lr 0.01 --epochs 40 --schedule 20 30 --gamma 0.1 --resume "+pruned_model_path+"vgg19_bn_7_pruned_model.pth --checkpoint "+retrained_model_path+"vgg19_bn_7_pruned_model --train-batch 64 --class-index 7",
"python3 cifar_binary.py --pruned -a vgg19_bn --lr 0.01 --epochs 40 --schedule 20 30 --gamma 0.1 --resume "+pruned_model_path+"vgg19_bn_8_pruned_model.pth --checkpoint "+retrained_model_path+"vgg19_bn_8_pruned_model --train-batch 64 --class-index 8",
"python3 cifar_binary.py --pruned -a vgg19_bn --lr 0.01 --epochs 40 --schedule 20 30 --gamma 0.1 --resume "+pruned_model_path+"vgg19_bn_9_pruned_model.pth --checkpoint "+retrained_model_path+"vgg19_bn_9_pruned_model --train-batch 64 --class-index 9",
]
train(to_train)
|
test_client_http.py
|
import contextlib
import select
import socket
import threading
from contextlib import contextmanager
import botocore.session
from botocore.config import Config
from botocore.exceptions import (
ClientError,
ConnectionClosedError,
ConnectTimeoutError,
EndpointConnectionError,
ProxyConnectionError,
ReadTimeoutError,
)
from botocore.vendored.requests import exceptions as requests_exceptions
from botocore.vendored.six.moves import BaseHTTPServer, socketserver
from tests import mock, unittest
class TestClientHTTPBehavior(unittest.TestCase):
def setUp(self):
self.port = unused_port()
self.localhost = 'http://localhost:%s/' % self.port
self.session = botocore.session.get_session()
# We need to set fake credentials to ensure credentials aren't searched
# for which might make additional API calls (assume role, etc).
self.session.set_credentials('fakeakid', 'fakesecret')
@unittest.skip('Test has suddenly become extremely flakey.')
def test_can_proxy_https_request_with_auth(self):
proxy_url = 'http://user:pass@localhost:%s/' % self.port
config = Config(proxies={'https': proxy_url}, region_name='us-west-1')
client = self.session.create_client('ec2', config=config)
class AuthProxyHandler(ProxyHandler):
event = threading.Event()
def validate_auth(self):
proxy_auth = self.headers.get('Proxy-Authorization')
return proxy_auth == 'Basic dXNlcjpwYXNz'
try:
with background(run_server, args=(AuthProxyHandler, self.port)):
AuthProxyHandler.event.wait(timeout=60)
client.describe_regions()
except BackgroundTaskFailed:
self.fail('Background task did not exit, proxy was not used.')
@unittest.skip('Proxy cannot connect to service when run in CodeBuild.')
def test_proxy_request_includes_host_header(self):
proxy_url = 'http://user:pass@localhost:%s/' % self.port
config = Config(
proxies={'https': proxy_url},
proxies_config={'proxy_use_forwarding_for_https': True},
region_name='us-west-1'
)
environ = {'BOTO_EXPERIMENTAL__ADD_PROXY_HOST_HEADER': "True"}
self.environ_patch = mock.patch('os.environ', environ)
self.environ_patch.start()
client = self.session.create_client('ec2', config=config)
class ConnectProxyHandler(ProxyHandler):
event = threading.Event()
def do_CONNECT(self):
remote_host, remote_port = self.path.split(':')
# Ensure we're sending the correct host header in CONNECT
if self.headers.get('host') != remote_host:
self.send_response(400)
self.end_headers()
return
self.send_response(200)
self.end_headers()
remote_host, remote_port = self.path.split(':')
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_socket.connect((remote_host, int(remote_port)))
self._tunnel(self.request, remote_socket)
remote_socket.close()
try:
with background(run_server, args=(ConnectProxyHandler, self.port)):
ConnectProxyHandler.event.wait(timeout=60)
client.describe_regions()
except BackgroundTaskFailed:
self.fail('Background task did not exit, proxy was not used.')
except ProxyConnectionError:
self.fail('Proxy CONNECT failed, unable to establish connection.')
except ClientError as e:
# Fake credentials won't resolve against service
# but we've successfully contacted through the proxy
assert e.response['Error']['Code'] == 'AuthFailure'
finally:
self.environ_patch.stop()
def _read_timeout_server(self):
config = Config(
read_timeout=0.1,
retries={'max_attempts': 0},
region_name='us-weast-2',
)
client = self.session.create_client('ec2', endpoint_url=self.localhost,
config=config)
client_call_ended_event = threading.Event()
class FakeEC2(SimpleHandler):
event = threading.Event()
msg = b'<response/>'
def get_length(self):
return len(self.msg)
def get_body(self):
client_call_ended_event.wait(timeout=60)
return self.msg
try:
with background(run_server, args=(FakeEC2, self.port)):
try:
FakeEC2.event.wait(timeout=60)
client.describe_regions()
finally:
client_call_ended_event.set()
except BackgroundTaskFailed:
self.fail('Fake EC2 service was not called.')
def test_read_timeout_exception(self):
with self.assertRaises(ReadTimeoutError):
self._read_timeout_server()
def test_old_read_timeout_exception(self):
with self.assertRaises(requests_exceptions.ReadTimeout):
self._read_timeout_server()
@unittest.skip('The current implementation will fail to timeout on linux')
def test_connect_timeout_exception(self):
config = Config(
connect_timeout=0.2,
retries={'max_attempts': 0},
region_name='us-weast-2',
)
client = self.session.create_client('ec2', endpoint_url=self.localhost,
config=config)
server_bound_event = threading.Event()
client_call_ended_event = threading.Event()
def no_accept_server():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', self.port))
server_bound_event.set()
client_call_ended_event.wait(timeout=60)
sock.close()
with background(no_accept_server):
server_bound_event.wait(timeout=60)
with self.assertRaises(ConnectTimeoutError):
client.describe_regions()
client_call_ended_event.set()
def test_invalid_host_gaierror(self):
config = Config(retries={'max_attempts': 0}, region_name='us-weast-1')
endpoint = 'https://ec2.us-weast-1.amazonaws.com/'
client = self.session.create_client('ec2', endpoint_url=endpoint,
config=config)
with self.assertRaises(EndpointConnectionError):
client.describe_regions()
def test_bad_status_line(self):
config = Config(retries={'max_attempts': 0}, region_name='us-weast-2')
client = self.session.create_client('ec2', endpoint_url=self.localhost,
config=config)
class BadStatusHandler(BaseHTTPServer.BaseHTTPRequestHandler):
event = threading.Event()
def do_POST(self):
self.wfile.write(b'garbage')
with background(run_server, args=(BadStatusHandler, self.port)):
with self.assertRaises(ConnectionClosedError):
BadStatusHandler.event.wait(timeout=60)
client.describe_regions()
def unused_port():
with contextlib.closing(socket.socket()) as sock:
sock.bind(('127.0.0.1', 0))
return sock.getsockname()[1]
class SimpleHandler(BaseHTTPServer.BaseHTTPRequestHandler):
status = 200
def get_length(self):
return 0
def get_body(self):
return b''
def do_GET(self):
length = str(self.get_length())
self.send_response(self.status)
self.send_header('Content-Length', length)
self.end_headers()
self.wfile.write(self.get_body())
do_POST = do_PUT = do_GET
class ProxyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
tunnel_chunk_size = 1024
poll_limit = 10**4
def _tunnel(self, client, remote):
client.setblocking(0)
remote.setblocking(0)
sockets = [client, remote]
noop_count = 0
while True:
readable, writeable, _ = select.select(sockets, sockets, [], 1)
if client in readable and remote in writeable:
noop_count = 0
client_bytes = client.recv(self.tunnel_chunk_size)
if not client_bytes:
break
remote.sendall(client_bytes)
if remote in readable and client in writeable:
noop_count = 0
remote_bytes = remote.recv(self.tunnel_chunk_size)
if not remote_bytes:
break
client.sendall(remote_bytes)
if noop_count > self.poll_limit:
# We have a case where all communication has
# finished but we never saw an empty read.
# This will leave both sockets as writeable
# indefinitely. We'll force a break here if
# we've crossed our polling limit.
break
noop_count += 1
def do_CONNECT(self):
if not self.validate_auth():
self.send_response(401)
self.end_headers()
return
self.send_response(200)
self.end_headers()
remote_host, remote_port = self.path.split(':')
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_socket.connect((remote_host, int(remote_port)))
self._tunnel(self.request, remote_socket)
remote_socket.close()
def validate_auth(self):
return True
class BackgroundTaskFailed(Exception):
pass
@contextmanager
def background(target, args=(), timeout=60):
thread = threading.Thread(target=target, args=args)
thread.daemon = True
thread.start()
try:
yield target
finally:
thread.join(timeout=timeout)
if thread.is_alive():
msg = 'Background task did not exit in a timely manner.'
raise BackgroundTaskFailed(msg)
def run_server(handler, port):
address = ('', port)
httpd = socketserver.TCPServer(address, handler, bind_and_activate=False)
httpd.allow_reuse_address = True
httpd.server_bind()
httpd.server_activate()
handler.event.set()
httpd.handle_request()
httpd.server_close()
|
GUI.py
|
import wx
import tools.Estres as Estres
import mimetypes
import os.path
import threading
from wx.lib.plot import PlotCanvas
from tools.plot import plot
import math
class Grid(wx.Dialog): # Crea la tabla de contenido que da un formato simetrico a los elementos graficos
def __init__(self, gridSize, matrix, *args, **kwds):
# begin wxGlade: MyDialog.__init__
self.gridSize = gridSize
wx.Dialog.__init__(self, *args, **kwds)
grid = wxgrid.Grid(self, -1)
grid.CreateGrid(self.gridSize,self.gridSize)
self.Show()
class GUI(wx.Frame): #Clase de la aplicacion grafica
def __init__(self, *args, **kwds):
# begin wxGlade: GUI.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
#Aqui se inicializa la prueba de estres antes de su ejecucion
self.test = Estres.Estres(hilos = 1 ,tiempo = None, url = None, payload = None, tipo = "GET", headers = None,auth = None, archivo = None, archivoRespuestas ="./out")
#Aqui se inicializan las variables que seran modificadas segun el estado de los elementos graficos
self.fileDatos = False
self.fileHeader = False
self.datos = None
self.header = None
self.timeChecked = False
self.multipartChecked = False
self.datosArchChecked = False
self.headerArchChecked = False
self.testEnProceso = False
#Aqui se inicializan los elementos graficos con sus respectivas clases y parametros
self.SetSize((1000, 400))
self.panel_1 = wx.Panel(self, wx.ID_ANY)
self.btnArchivoSalida = wx.Button(self.panel_1, wx.ID_ANY, "archivo", style=wx.BU_EXACTFIT)
self.cmbxTipo = wx.ComboBox(self.panel_1, wx.ID_ANY, choices=["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD"], value="GET" , style=wx.CB_DROPDOWN)
self.txtUrl = wx.TextCtrl(self.panel_1, wx.ID_ANY, "")
self.spnTiempo = wx.SpinCtrl(self.panel_1, wx.ID_ANY, "0", min=0, max=360000)
self.chkBxTiempo = wx.CheckBox(self.panel_1, wx.ID_ANY, "Activar Tiempo")
self.spnHilos = wx.SpinCtrl(self.panel_1, wx.ID_ANY, "1", min=0, max=10000, style=wx.SP_ARROW_KEYS | wx.SP_WRAP)
self.txtHeader = wx.TextCtrl(self.panel_1, wx.ID_ANY, "")
self.rdboxHeader = wx.RadioBox(self.panel_1, wx.ID_ANY, "Opciones Headers", choices=["Usar Texto", "Usar Archivo"], majorDimension=2, style=wx.RA_SPECIFY_ROWS)
self.btnHeader = wx.Button(self.panel_1, wx.ID_ANY, "Archivo")
self.txtDatos = wx.TextCtrl(self.panel_1, wx.ID_ANY, "")
self.rdoBoxEntrada = wx.RadioBox(self.panel_1, wx.ID_ANY, "Opciones Entrada", choices=["Usar Texto", "Usar Archivo"], majorDimension=2, style=wx.RA_SPECIFY_ROWS)
self.btnArchivoEnviar = wx.Button(self.panel_1, wx.ID_ANY, "Archivo")
self.btnEntrada = wx.Button(self.panel_1, wx.ID_ANY, "Entrada", style=wx.BU_EXACTFIT)
self.chkbMultiPart = wx.CheckBox(self.panel_1, wx.ID_ANY, "Enviar Multipart")
self.txtAuth = wx.TextCtrl(self.panel_1, wx.ID_ANY, "")
self.btnReset = wx.Button(self.panel_1, wx.ID_ANY, "Resetear Datos")
self.btnIniciarTest = wx.Button(self.panel_1, wx.ID_ANY, "Iniciar Test")
self.__set_properties()
self.__do_layout()
#Se inicializan los eventos y señales personalizados
self.Bind(wx.EVT_BUTTON, self.guardarSalida, self.btnArchivoSalida)
self.Bind(wx.EVT_COMBOBOX, self.getTipo, self.cmbxTipo)
self.Bind(wx.EVT_TEXT, self.getUrl, self.txtUrl)
self.Bind(wx.EVT_SPINCTRL, self.getTiempo, self.spnTiempo)
self.Bind(wx.EVT_TEXT, self.getTiempo, self.spnTiempo)
self.Bind(wx.EVT_SPINCTRL, self.getHilos, self.spnHilos)
self.Bind(wx.EVT_TEXT, self.getHilos, self.spnHilos)
self.Bind(wx.EVT_TEXT, self.getHeader, self.txtHeader)
self.Bind(wx.EVT_RADIOBOX, self.optHeader, self.rdboxHeader)
self.Bind(wx.EVT_BUTTON, self.abrirHeader, self.btnHeader)
self.Bind(wx.EVT_RADIOBOX, self.optDatos, self.rdoBoxEntrada)
self.Bind(wx.EVT_BUTTON, self.abrirMultipart, self.btnArchivoEnviar)
self.Bind(wx.EVT_BUTTON, self.abrirDatos, self.btnEntrada)
self.Bind(wx.EVT_CHECKBOX, self.optMultipart, self.chkbMultiPart)
self.Bind(wx.EVT_TEXT, self.getAuth, self.txtAuth)
self.Bind(wx.EVT_BUTTON, self.resetForm, self.btnReset)
self.Bind(wx.EVT_BUTTON, self.iniciarTest, self.btnIniciarTest)
self.Bind(wx.EVT_CHECKBOX, self.usarTiempo, self.chkBxTiempo)
self.Bind(wx.EVT_TEXT, self.getDatos, self.txtDatos)
# end wxGlade
def __set_properties(self):
# begin wxGlade: Ventana.__set_properties
self.SetTitle("VpostHorde")
self.rdboxHeader.SetSelection(0)
self.rdoBoxEntrada.SetSelection(0)
# end wxGlade
def __do_layout(self):#da el formato grafico a los elementos dentro del grid
# begin wxGlade: Ventana.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_1 = wx.GridSizer(0, 6, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
lblArchivoSalida = wx.StaticText(self.panel_1, wx.ID_ANY, "Archivo Salida:", style=wx.ALIGN_CENTER)
grid_sizer_1.Add(lblArchivoSalida, 5, wx.ALL | wx.EXPAND | wx.FIXED_MINSIZE, 2)
grid_sizer_1.Add(self.btnArchivoSalida, 0, wx.EXPAND, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
lblTipo = wx.StaticText(self.panel_1, wx.ID_ANY, "Tipo:", style=wx.ALIGN_CENTER)
grid_sizer_1.Add(lblTipo, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.cmbxTipo, 0, wx.EXPAND, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
lblUrl = wx.StaticText(self.panel_1, wx.ID_ANY, "URL:", style=wx.ALIGN_CENTER)
grid_sizer_1.Add(lblUrl, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.txtUrl, 0, wx.EXPAND, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
lblTiempo = wx.StaticText(self.panel_1, wx.ID_ANY, "Tiempo:", style=wx.ALIGN_CENTER)
grid_sizer_1.Add(lblTiempo, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.spnTiempo, 3, wx.ALL | wx.EXPAND, 3)
grid_sizer_1.Add(self.chkBxTiempo, 0, wx.ALIGN_RIGHT | wx.EXPAND, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
lblHilos = wx.StaticText(self.panel_1, wx.ID_ANY, "Numero de Hilos:")
grid_sizer_1.Add(lblHilos, 0, wx.ALIGN_CENTER, 0)
grid_sizer_1.Add(self.spnHilos, 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
lblHeader = wx.StaticText(self.panel_1, wx.ID_ANY, "Header:", style=wx.ALIGN_CENTER)
grid_sizer_1.Add(lblHeader, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.txtHeader, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.rdboxHeader, 0, wx.FIXED_MINSIZE, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add(self.btnHeader, 0, wx.EXPAND, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
lblDatos = wx.StaticText(self.panel_1, wx.ID_ANY, "Datos:", style=wx.ALIGN_CENTER)
grid_sizer_1.Add(lblDatos, 0, wx.ALIGN_CENTER, 0)
grid_sizer_1.Add(self.txtDatos, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.rdoBoxEntrada, 0, 0, 0)
lblArchivoEnviar = wx.StaticText(self.panel_1, wx.ID_ANY, "Archivo a Enviar:", style=wx.ALIGN_CENTER)
grid_sizer_1.Add(lblArchivoEnviar, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.btnArchivoEnviar, 0, wx.EXPAND, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
lblArchivoEntrada = wx.StaticText(self.panel_1, wx.ID_ANY, "Seleccionar Archivo:",style=wx.ALIGN_CENTER)
grid_sizer_1.Add(lblArchivoEntrada, 0, 0, 0)
grid_sizer_1.Add(self.btnEntrada, 0, wx.EXPAND, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add(self.chkbMultiPart, 0, wx.EXPAND, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
lblAuth = wx.StaticText(self.panel_1, wx.ID_ANY, "Auth:", style=wx.ALIGN_CENTER)
grid_sizer_1.Add(lblAuth, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.txtAuth, 0, wx.EXPAND, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add(self.btnReset, 0, wx.EXPAND, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add(self.btnIniciarTest, 0, wx.EXPAND, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
grid_sizer_1.Add((0, 0), 0, 0, 0)
self.panel_1.SetSizer(grid_sizer_1)
sizer_1.Add(self.panel_1, 1, wx.EXPAND, 0)
sizer_1.Add((0, 0), 0, 0, 0)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
def guardarSalida(self, event):#Abre una ventana de dialogo que guardara la ruta del archivo de salida
with wx.FileDialog(self, "Guardar salida", style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
self.test.archivoRespuestas = "out"
return
self.test.archivoRespuestas = fileDialog.GetPath()
#Estos metodos se ejecutan al recibir la interrupcion cuando cambia su informacion
def getTipo(self, event): # wxGlade: Ventana.<event_handler>
self.test.tipo = self.cmbxTipo.GetStringSelection()
def getUrl(self, event): # wxGlade: Ventana.<event_handler>
self.test.url = self.txtUrl.GetLineText(0)
def getTiempo(self, event): # wxGlade: Ventana.<event_handler>
if self.timeChecked == True:
self.test.tiempo = self.spnTiempo.GetValue()
def getHilos(self, event): # wxGlade: Ventana.<event_handler>
self.test.hilos = self.spnHilos.GetValue()
def getHeader(self, event): # wxGlade: Ventana.<event_handler>
if self.headerArchChecked == False:
self.test.setHeader(self.txtHeader.GetLineText(0))
else:
self.header = self.txtHeader.GetLineText(0)
def optHeader(self, event): # wxGlade: Ventana.<event_handler>
if self.rdboxHeader.GetSelection() == 1:
self.headerArchChecked = True
else:
self.headerArchChecked = False
def abrirHeader(self, event): # wxGlade: Ventana.<event_handler>
with wx.FileDialog(self, "Abrir archivo de headers",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
filename = fileDialog.GetFilename()
pathname = fileDialog.GetPath()
try:
with open(pathname, 'r') as payload:
self.test.payload = ''
for line in payload:
self.test.payload = self.test.payload.replace('\n','') + line
except IOError:
wx.LogError("no se puede abrir el archivo '%s'." % newfile)
def optDatos(self, event): # wxGlade: Ventana.<event_handler>
if self.rdoBoxEntrada.GetSelection() == 1:
self.datosArchChecked = True
else:
self.datosArchChecked = False
def abrirMultipart(self, event): # wxGlade: Ventana.<event_handler>
with wx.FileDialog(self, "Abrir archivo multipart",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
filename = fileDialog.GetFilename()
pathname = fileDialog.GetPath()
try:
self.test.file = open(pathname, 'rb')
except IOError:
wx.LogError("no se puede abrir el archivo '%s'." % newfile)
def abrirDatos(self, event): # wxGlade: Ventana.<event_handler>
with wx.FileDialog(self, "Abrir archivo de datos",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
filename = fileDialog.GetFilename()
pathname = fileDialog.GetPath()
try:
with open(pathname, 'r') as payload:
self.test.payload = ''
for line in payload:
self.test.payload = self.test.payload.replace('\n','') + line
except IOError:
wx.LogError("no se puede abrir el archivo '%s'." % newfile)
def optMultipart(self, event): # wxGlade: Ventana.<event_handler>
self.multipartChecked = self.chkbMultiPart.IsChecked()
def getAuth(self, event): # wxGlade: Ventana.<event_handler>
self.test.auth = self.txtAuth.GetLineText(0)
def resetForm(self, event): # wxGlade: Ventana.<event_handler>
Estres.Estres(hilos = 1 ,tiempo = None, url = None, payload = None, tipo = "GET", headers = None,auth = None, archivo = None, archivoRespuestas ="./out")
#Este metodo inicia el test
def iniciarTest(self, event): # wxGlade: Ventana.<event_handler>
if self.test.url == None or self.test.url == "":
with wx.MessageDialog(self, "No introduciste URL","Aviso") as dialog:
dialog.ShowModal()
return
elif self.test.headers == "no Dict":
with wx.MessageDialog(self, "No introduciste header en formato valido","Aviso") as dialog:
dialog.ShowModal()
return
else:
mutex = threading.Semaphore(0) #Inicia un mutex que sera pasado
thread = threading.Thread(target = self.test.iniciarHilos,args=(mutex,))
thread.start()
self.espera(thread)
mutex.acquire() #Espera a que se libere el semaforo al finalizar el test
analisis = self.test.crearAnalisis() #Crea el analisis con los resultados
# Dibuja la grafica en una ventana grafica
with wx.MessageDialog(self, str(analisis.exitosVSFallos)+" tiempo promedio: "+str(analisis.tiempo_promedio)+"\nCodigos de estado devueltos: "+str(analisis.state_codes_dict) + "\nLos resultados crudos se pueden consular en: " + self.test.archivoRespuestas+".txt" ,"Resultados") as dialog:
dialog.ShowWindowModal()
graficque = analisis.dibujar_state_codes()
grafica = plot(graficque)
grafica.dibujar()
def espera(self,hilo): #Abre un dialogo que se cerrara cuando se libere el semaforo
if hilo.is_alive() == False:
with wx.MessageDialog(self,"El test ya termino","Aviso") as finished:
val = finished.ShowWindowModal()
else:
with wx.MessageDialog(self,"El test esta en proceso","Aviso") as espera:
val = espera.ShowWindowModal()
def usarTiempo(self, event):
self.timeChecked = self.chkBxTiempo.IsChecked()
if self.timeChecked == True:
self.test.tiempo = self.spnTiempo.GetValue()
else:
self.test.tiempo = None
def getDatos(self, event):
if self.fileDatos == False:
self.test.setPayload(self.txtDatos.GetLineText(0))
else:
self.datos = self.txtDatos.GetLineText(0)
# end of class Ventana
class MyApp(wx.App): #Inicializacion propia del wx
def OnInit(self):
self.frame = GUI(None, wx.ID_ANY, "")
self.SetTopWindow(self.frame)
self.frame.Show()
return True
|
__init__.py
|
# -*- encoding: utf-8 -*-
"""
_core/async_tasks/__init__.py
- provides ASYNC FUNCTIONS | DECORATORS
"""
from log_config import log, pformat
print()
log.debug(">>> _core.async_tasks.__init__.py ..." )
log.debug(">>> async ... loading async functions as global variables")
from threading import Thread
# from flask import current_app
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
### ASYNC FUNCTIONS
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
# cf : https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-xi-email-support
def async(f):
"""
async decorator to run a function as a thread
"""
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
|
skill_server.py
|
#!/usr/bin/env python
import os
import rospy
import threading
import numpy as np
import csv
from flask import Flask
from flask_ask import Ask, question, statement
from std_msgs.msg import String
from ironfish_captain.srv import captain_command
app = Flask(__name__)
ask = Ask(app, "/")
# ROS node, publisher, and parameter.
# The node is started in a separate thread to avoid conflicts with Flask.
# The parameter *disable_signals* must be set if node is not initialized
# in the main thread.
threading.Thread(target=lambda: rospy.init_node('test_node', disable_signals=True)).start()
pub = rospy.Publisher('naviIntent', String, queue_size=1)
NGROK = rospy.get_param('/ngrok', None)
@ask.launch
def launch():
'''
Executed when launching skill: say "Alexa, ask tester"
'''
welcome_sentence = 'Hello, this is ironfish, your personal robotics assistant.'
return question(welcome_sentence)
def send_command(object_,location):
try:
command = rospy.ServiceProxy('captain_service', captain_command)
resp = command(object_, location)
return resp.task_flag
except rospy.ServiceException, e:
print "Service call failed: %s"%e
@ask.intent('MadeFromIntent')
def made_intent_function():
return statement("Hi, I am iron fish. From National Taiwan University, Department of Electrical Engineering, a.k.a. N.T.U.E.E.")
@ask.intent('NavigationIntent', default={'place':"", 'object':"", 'roomNumber':""})
def navi_intent_function(place, object, roomNumber):
'''
Executed when "TestIntent" is called:
say "Alexa, ask tester to say (first name of a person)"
Note that the 'intent_name' argument of the decorator @ask.intent
must match the name of the intent in the Alexa skill.
'''
location = "default"
object_ = "center"
# prasing location
if len(place)>0 and len(roomNumber)>0: location = place + roomNumber
elif len(place)>0 and len(roomNumber)==0: location = place
else: location = location
# prasing object
if len(object)>0:
object_list = object.split(' ')
if len(object_list)>1:
temp = ""
for i,item in enumerate(range(len(object_list)-1)):
if len(object_list[i])>len(object_list[i+1]): temp=object_list[i]
elif object_list[i] == "the": temp=object_list[i+1]
elif object_list[i+1] == "the": temp=object_list[i]
else: temp=object_list[i+1]
object_ = temp
else:
object_ = object
else: object_ = object_
print(object_)
# call service to navi and pubish topic as tracing use
output = "obj: {}; loc: {}; num: {}".format(object_,location,roomNumber)
pub.publish(output)
captain_resp = send_command(object_,location)
print "service responese:", captain_resp
if bool(captain_resp):
if object_ == "center": return statement('Ok, I am on the way to the {} of {}.'.format(object_,location))
else: return statement('Ok, I am on the way to the {} in {}.'.format(object_,location))
else:
if object_ == "center": return statement('Sorry, the place {} is not registed on the map.'.format(location))
else: return statement('Sorry, the {} in {} is not registed on the map.'.format(object_,location))
@ask.intent('AMAZON.StopIntent')
def stop():
return statement("Goodbye")
@ask.intent('AMAZON.CancelIntent')
def cancel():
return statement("Goodbye")
@ask.intent('AMAZON.NavigateHomeIntent')
def home():
return statement("Goodbye")
@ask.session_ended
def session_ended():
return "{}", 200
@ask.session_ended
def session_ended():
return "{}", 200
if __name__ == '__main__':
if NGROK:
print 'NGROK mode'
app.run(host=os.environ['ROS_IP'], port=5000)
else:
print 'Manual tunneling mode'
dirpath = os.path.dirname(__file__)
cert_file = os.path.join(dirpath, '../config/ssl_keys/certificate.pem')
pkey_file = os.path.join(dirpath, '../config/ssl_keys/private-key.pem')
app.run(host=os.environ['ROS_IP'], port=5000,
ssl_context=(cert_file, pkey_file))
|
test_datastore.py
|
#!/usr/bin/env python
# Datastore testing abstract class
#
# NOMURA Yoshihide <nomura@pobox.com>
import os
import sys
import unittest
from appscale.common.constants import APPSCALE_HOME
from appscale.datastore import appscale_datastore
from appscale.datastore import helper_functions as hf
from appscale.datastore.dbconstants import USERS_SCHEMA
from appscale.datastore.dbconstants import USERS_TABLE
from test import test_support
from threading import Thread
USERS_VALUES = ["suwanny@gmail.com", "11", "2009", "2009", "2009",
"bbs", "xxx", "xxx", "1", "yyy",
"0.0.0.0", "2009", "zzz", "yes"]
APPS_VALUES = ["name", "python", "version","owner","admins_list","host",
"port","creation_date", "last_time_updated_date", "yaml_file", "cksum",
"num_entries", "xxxx", "yes", "class", "index"]
ERROR_CODE = "DB_ERROR:"
datastore_name = None
def createRandomList(number_of_columns, column_name_len):
columns = []
for ii in range(0, number_of_columns):
columns += [hf.random_string(column_name_len)]
return columns
class TestDatastoreFunctions(unittest.TestCase):
def test_getput(self):
ret = self.db.put_entity(USERS_TABLE, "1", USERS_SCHEMA, USERS_VALUES)
self.assertEqual(self.error_code, ret[0])
ret = self.db.get_entity(USERS_TABLE, "1", USERS_SCHEMA)
self.assertEqual(self.error_code, ret[0])
self.assertEqual(USERS_VALUES[0], ret[1])
self.assertEqual(USERS_VALUES[1], ret[2])
ret = self.db.delete_row(USERS_TABLE, "1")
self.assertEqual(self.error_code, ret[0])
def test_getput_longkey(self):
longKey = "1111111111111111111111111111111111111111111111111111111111111111111111"
ret = self.db.put_entity(USERS_TABLE, longKey, USERS_SCHEMA, USERS_VALUES)
self.assertEqual(self.error_code, ret[0])
ret = self.db.get_entity(USERS_TABLE, longKey, USERS_SCHEMA)
self.assertEqual(self.error_code, ret[0])
self.assertEqual(USERS_VALUES[0], ret[1])
self.assertEqual(USERS_VALUES[1], ret[2])
ret = self.db.delete_row(USERS_TABLE, longKey)
self.assertEqual(self.error_code, ret[0])
def test_getput_longkey2(self):
longKey = "1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"
ret = self.db.put_entity(USERS_TABLE, longKey, USERS_SCHEMA, USERS_VALUES)
self.assertEqual(self.error_code, ret[0])
ret = self.db.get_entity(USERS_TABLE, longKey, USERS_SCHEMA)
self.assertEqual(self.error_code, ret[0])
self.assertEqual(USERS_VALUES[0], ret[1])
self.assertEqual(USERS_VALUES[1], ret[2])
ret = self.db.delete_row(USERS_TABLE, longKey)
self.assertEqual(self.error_code, ret[0])
def test_get_each_column(self):
table = "eachcolumntest"
key = "eachcolumntest"
columns = ["1", "2", "3", "4", "5"]
values = ["1", "2", "3", "4", "5"]
ret = self.db.put_entity(table, key, columns, values)
self.assertEqual(self.error_code, ret[0])
ret = self.db.get_entity(table, key, columns)
self.assertEqual(self.error_code, ret[0])
self.assertEqual("1", ret[1])
self.assertEqual("5", ret[5])
ret = self.db.get_entity(table, key, ["3"])
self.assertEqual(self.error_code, ret[0])
self.assertEqual("3", ret[1])
ret = self.db.get_entity(table, key, ["5"])
self.assertEqual(self.error_code, ret[0])
self.assertEqual("5", ret[1])
ret = self.db.get_entity(table, key, ["1", "2"])
self.assertEqual(self.error_code, ret[0])
self.assertEqual("1", ret[1])
self.assertEqual("2", ret[2])
ret = self.db.get_entity(table, key, ["4", "5"])
self.assertEqual(self.error_code, ret[0])
self.assertEqual("4", ret[1])
self.assertEqual("5", ret[2])
ret = self.db.delete_row(table, key)
self.assertEqual(self.error_code, ret[0])
ret = self.db.delete_table(table)
self.assertEqual(self.error_code, ret[0])
def notest_delete_row_nonexisting_table(self):
ret = self.db.delete_row("dummytable", "dummyappname")
self.assertNotEqual(self.error_code, ret[0])
def notest_delete_table_nonexisting_table(self):
ret = self.db.delete_table("dummytable")
self.assertNotEqual(self.error_code, ret[0])
def notest_delete_row_twice(self):
table = "deletetest"
key = "deletetest"
ret = self.db.put_entity(table, key, ["value"], ["value"])
self.assertEqual(self.error_code, ret[0])
ret = self.db.delete_row(table, key)
self.assertEqual(self.error_code, ret[0])
ret = self.db.delete_row(table, key)
self.assertNotEqual(self.error_code, ret[0])
ret = self.db.delete_table(table)
self.assertEqual(self.error_code, ret[0])
def test_update_column(self):
table = "updatecolumntest"
key = "updatecolumntest"
columns = ["1", "2", "3", "4", "5"]
values = ["1", "2", "3", "4", "5"]
ret = self.db.put_entity(table, key, columns, values)
self.assertEqual(self.error_code, ret[0])
ret = self.db.get_entity(table, key, columns)
self.assertEqual(self.error_code, ret[0])
self.assertEqual("1", ret[1])
ret = self.db.put_entity(table, key, ["1"], ["one"])
self.assertEqual(self.error_code, ret[0])
ret = self.db.get_entity(table, key, columns)
self.assertEqual(self.error_code, ret[0])
self.assertEqual("one", ret[1])
ret = self.db.delete_row(table, key)
self.assertEqual(self.error_code, ret[0])
ret = self.db.delete_table(table)
self.assertEqual(self.error_code, ret[0])
def test_getschema(self):
user_schema = self.db.get_schema(USERS_TABLE)
self.assertEqual(self.error_code, user_schema[0])
self.assertEqual(1, user_schema.count("email"))
self.assertEqual(1, user_schema.count("pw"))
self.assertEqual(1, user_schema.count("enabled"))
def test_getschema_nonexisting_table(self):
ret = self.db.get_schema("dummytable")
self.assertNotEqual(self.error_code, ret[0])
def test_getput_binary(self):
f = open('%s/AppDB/test/guestbook.tar.gz' % APPSCALE_HOME, 'r')
data = f.read()
ret = self.db.put_entity("binarytest", "binarytest", ["Encoded_Entity"], [data])
self.assertEqual(self.error_code, ret[0])
ret = self.db.get_entity("binarytest", "binarytest", ["Encoded_Entity"])
self.assertEqual(self.error_code, ret[0])
self.assertEqual(data, ret[1])
ret = self.db.delete_row("binarytest", "binarytest")
self.assertEqual(self.error_code, ret[0])
ret = self.db.delete_table("binarytest")
self.assertEqual(self.error_code, ret[0])
def test_getput_bigbinary(self):
f = open('%s/AppDB/test/bigbinary' % APPSCALE_HOME, 'r')
data = f.read()
ret = self.db.put_entity("binarytest", "binarytest", ["Encoded_Entity"], [data])
self.assertEqual(self.error_code, ret[0])
ret = self.db.get_entity("binarytest", "binarytest", ["Encoded_Entity"])
self.assertEqual(self.error_code, ret[0])
self.assertEqual(data, ret[1])
ret = self.db.delete_row("binarytest", "binarytest")
self.assertEqual(self.error_code, ret[0])
ret = self.db.delete_table("binarytest")
self.assertEqual(self.error_code, ret[0])
def test_gettable(self):
table = "dummytable"
schema = ["value1", "value2"]
ret = self.db.put_entity(table, "1", schema, ["1", "2"])
self.assertEqual(self.error_code, ret[0])
ret = self.db.put_entity(table, "2", schema, ["3", "4"])
self.assertEqual(self.error_code, ret[0])
ret = self.db.get_table(table, schema)
self.assertEqual(self.error_code, ret[0])
self.assertEqual("1", ret[1])
self.assertEqual("2", ret[2])
self.assertEqual("3", ret[3])
self.assertEqual("4", ret[4])
ret = self.db.delete_table(table)
self.assertEqual(self.error_code, ret[0])
def test_delete_table(self):
key = "11111111111111111111111111111"
ret = self.db.put_entity("deletetest", key, ["value"], ["value"])
self.assertEqual(self.error_code, ret[0])
ret = self.db.get_entity("deletetest", key, ["value"])
self.assertEqual(self.error_code, ret[0])
self.assertEqual("value", ret[1])
ret = self.db.delete_table("deletetest")
self.assertEqual(self.error_code, ret[0])
ret = self.db.get_entity("deletetest", key, ["value"])
self.assertNotEqual(self.error_code, ret[0])
def notest_delete_table_twice(self):
key = "11111111111111111111111111111"
ret = self.db.put_entity("deletetest", key, ["value"], ["value"])
self.assertEqual(self.error_code, ret[0])
ret = self.db.delete_table("deletetest")
self.assertEqual(self.error_code, ret[0])
ret = self.db.delete_table("deletetest")
self.assertNotEqual(self.error_code, ret[0])
def test_200requests(self):
table = "testmulti"
prekey = "value"
valuename = "value"
value = "intervaltestvalue"
for i in range(200):
key = prekey + str(i)
ret = self.db.put_entity(table, key, [valuename], [value])
self.assertEqual(self.error_code, ret[0])
for i in range(200):
key = prekey + str(i)
ret = self.db.get_entity(table, key, [valuename])
if len(ret) < 2:
ret[1] = ""
self.assertEqual(value, ret[1])
self.assertEqual(self.error_code, ret[0])
ret = self.db.delete_row(table, key)
self.assertEqual(self.error_code, ret[0])
ret = self.db.delete_table(table)
self.assertEqual(self.error_code, ret[0])
# 10 threads get/put/delete 100 requests
def test_10multi_requests(self):
table = "testmulti"
valuename = "value"
value = "intervaltestvalue"
# create table before testing
ret = self.db.put_entity(table, "dummykey", [valuename], [value])
self.assertEqual(self.error_code, ret[0])
tlist = []
for tnum in range(10):
key = "key" + str(tnum) + "num"
thread = Thread(target = self._multi50requests, args = [key])
thread.start()
tlist.append(thread)
for t in tlist:
t.join()
ret = self.db.delete_table(table)
self.assertEqual(self.error_code, ret[0])
def test_20multi_requests(self):
table = "testmulti"
valuename = "value"
value = "intervaltestvalue"
# create table before testing
ret = self.db.put_entity(table, "dummykey", [valuename], [value])
self.assertEqual(self.error_code, ret[0])
tlist = []
for tnum in range(20):
key = "key" + str(tnum) + "num"
thread = Thread(target = self._multi50requests, args = [key])
thread.start()
tlist.append(thread)
for t in tlist:
t.join()
ret = self.db.delete_table(table)
self.assertEqual(self.error_code, ret[0])
def _multi50requests(self, prekey):
table = "testmulti"
valuename = "value"
value = "intervaltestvalue"
for i in range(50):
key = prekey + str(i)
ret = self.db.put_entity(table, key, [valuename], [value])
self.assertEqual(self.error_code, ret[0])
for i in range(50):
key = prekey + str(i)
ret = self.db.get_entity(table, key, [valuename])
if len(ret) < 2:
ret[1] = ""
self.assertEqual(value, ret[1])
self.assertEqual(self.error_code, ret[0])
for i in range(50):
key = prekey + str(i)
ret = self.db.delete_row(table, key)
self.assertEqual(self.error_code, ret[0])
def setUp(self):
global datastore_name
self.db = appscale_datastore.DatastoreFactory.getDatastore(datastore_name)
self.error_code = ERROR_CODE
def test_main():
# prepare bigbinary
filename = "%s/AppDB/test/bigbinary" % APPSCALE_HOME
os.system("dd if=/dev/zero of=%s bs=1M count=20" % filename)
# do test
global datastore_name
datastore_name = sys.argv[1]
test_support.run_unittest(TestDatastoreFunctions)
if __name__ == "__main__":
test_main()
|
Meridian_console.py
|
# #!/usr/bin/python3
# coding: UTF-8
# もしくは #!/usr/bin/env python など環境に合わせて
# Izumi Ninagawa & Meridian project
# 2022.02.05 UDP通信動作は安定。
# 2022.02.05 COMMAND WINDOWのPOWERチェックボックスでサーボ電源ON
# 2022.02.05 上記サーボ電源ON中にスライドバー操作でサーボ動作(ただしスライドバーが小さいため大まかな動作確認のみに利用可)
# 2022.04.05 コードを少し整理整頓
# 2022.04.14 各経路でのエラーの検知と表示の機能を搭載
# 2022.04.18 intel Mac用にdearpygui処理をmainスレッドに変更。M1 Macではdearpyguiが未対応のため動かないはず。
# 2022.04.25 command欄にパワーオンや送受信の経路スイッチャを追加。(ROS1の受信は機能として未テスト)
# 2022.04.25 virtualのボタンも機能未実装(ハードウェアのセンサ信号などを仮想化した環境ものと通信できるようにする想定)
# 2022.05.01 DEMOの動作を全サーボの信号が変更するように微調整。全身で小さな動きのダンスをします。
# 取扱説明書
# ・起動方法
# 当ファイルがあるディレクトリにて、ターミナルより
# python3 Meridian_console.py
# と入力して実行します。必要に応じてライブラリをpip3で追加してください。
# UDP_RESV_IP,UDP_SEND_IPについては予め調べスクリプト上で書き換えておく必要があります。
# UDP_RESV_IPはターミナルにてip a もしくはipconfig,ifconfig等で調べられます。
# UDP_SEND_IPはESP32の起動時にPCシリアルモニタ上に表示されます。
# ・画面について
# Command画面
# POWER: 全サーボのパワーをオンオフします
# Action: サインカーブの首振りモーションを送信します
# ->ROS1: ROS1のjointデータをパブリッシュします(Rvisと連動できます)
# <-ROS1: ROS1のサブスクライブですが未実装です。
# Control Pad Monitor: リモコンの入力状態を標準化して表示します。
# Message画面
# IPと各経路のエラーカウント、エラー率、フレーム数、動作周波数を表示します
# ResetCounter: カウンタの値をリセットするボタンです。
# TsySKIP, PcSKIP: 連番データの取りこぼし数を表示します(今はちょっと多めです。周波数を50Hzまで下げるとゼロになります。)
# Sensor Monitor: MIUのデータを表示します。rol,pit,yawはセンサフュージョン値です。SetYawボタンでヨー軸の中央値をリセットできます。
# Axis Monitor: 各サーボの値です。パワーオン時にはスライダでサーボを動かすことができます。
# 100Hz動作時にTeensyの受信スキップ回数が5%ほど検出されるのは、現在の仕様で正常な動作です。
from ast import Pass
import numpy as np
import socket
from contextlib import closing
import struct
import math
import dearpygui.dearpygui as dpg
import threading
import signal
import time
import atexit
import struct
#ROS 未搭載マシンの場合は、下記の2行を#でコメントアウトしてください。
import rospy
from sensor_msgs.msg import JointState
#定数
TITLE_VERSION="Meridian Console v22.0501" #DPGのウィンドウタイトル兼バージョン表示
UDP_RESV_IP="192.168.1.xx" #このPCのIPアドレス
UDP_RESV_PORT=22222 #受信ポート
UDP_SEND_IP="192.168.1.xx" #送信先のESP32のIPアドレス
UDP_SEND_PORT=22224 #送信ポート
MSG_SIZE = 90 #Meridim配列の長さ(デフォルトは90)
MSG_BUFF = MSG_SIZE * 2 #Meridim配列のバイト長さ
STEP = 90 #1フレームあたりに増加させる制御処理用の数値 サインカーブ1周を何分割するか。小さいほどデモ動作周期が速くなる。
#マスターコマンド用の定数(Meridim配列0番に格納する値)
CMD_SET_YAW_CENTER = 1002 #IMUのヨー軸センターリセットコマンド
#制御コマンド用フラグ等
flag_update_yaw_center = 0 #IMUのヨー軸センターリセットフラグ(python内部用)
flag_servo_power = 0 #全サーボのパワーオンオフフラグ
flag_resv_data = 0 #ESP32からの状態データの受信のオンオフフラグ(モーション送信時のシミュレーション空間用として)
flag_send_data = 0 #ESP32への状態データの送信のオンオフフラグ(サーボパワーオフでもデータ送信可能にすべく)
flag_send_virtual = 0 #ハードウェアを接続しないで動作させる場合のバーチャルハードのオンオフフラグ
flag_send_motion = 0 #計算モーション送信のオンオフフラグ
flag_demo_action = 0 #デモ/テスト用の計算モーション送信のオンオフフラグ
flag_ros1_pub = 0 #ROS1のjoint_statesのパブリッシュ
flag_ros1_sub = 0 #ROS1のjoint_statesのサブスクライブ
flag_ros1 = 0 #ROS1の起動init(初回のみ)
#UDP用のsocket設定
sock=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sock.bind((UDP_RESV_IP,UDP_RESV_PORT))
#エラー集計表示用変数
loop_count = 0 #フレーム数のカウンタ
error_count_esp_to_pc = 0 #PCからESP32へのUDP送信でのエラー数
error_count_pc_to_esp = 0 #ESP32からPCへのUDP送信でのエラー数
error_count_esp_to_tsy = 0 #ESPからTeensyへのSPI通信でのエラー数
error_count_tsy_to_esp = 0 #TeensyからESP32へのSPI通信でのエラー数
error_count_tsy_skip = 0 #Teensyが受信したデータがクロックカウントスキップしていたか
error_count_esp_skip = 0 #ESPが受信したデータがクロックカウントスキップしていたか
error_count_pc_skip = 0 #PCが受信したデータがクロックカウントスキップしていたか
frame_sync_s = 0 #送信するframe_sync_r(0-199)
frame_sync_r_expect = 0 #毎フレームカウントし、受信カウントと比較(0-199)
frame_sync_r_resv = 0 #今回受信したframe_sync_r5
start = 0
#Meridim配列関連
#r_meridim_disp=list(range(MSG_SIZE)) #Meridim配列の受信値short表示用
r_meridim_char=list(range(MSG_SIZE*2)) #Meridim配列の受信値char表示用
r_meridim=[0]*MSG_SIZE #Meridim配列の送信値用
s_meridim=[0]*MSG_SIZE #Meridim配列の送信値用
s_meridim_js_sub=[0]*MSG_SIZE #ROSからサブスクライブしたサーボ位置情報の格納用Meridim配列
s_meridim_motion=[0]*MSG_SIZE #Meridim配列のPC側で作成したサーボ位置命令送信用
s_meridim_motion_keep=[0]*MSG_SIZE #Meridim配列のパワーオン時の位置キープ用
#メッセージ表示用
message0 = "This PC's IP adress is "+UDP_RESV_IP
message1 = ""
message2 = ""
message3 = ""
message4 = ""
#モーション計算用変数
x = 0 #増分計算用 (STEPずつ)
y = 0 #増分計算用 (1ずつ)
jspn = list(range(30)) #サーボ角度のROS joint_states変換用の回転方向順逆補正
jspn[0] = 1 #頭ヨー
jspn[1] = 1 #左肩ピッチ
jspn[2] = 1 #左肩ロール
jspn[3] = 1 #左肘ヨー
jspn[4] = 1 #左肘ピッチ
jspn[5] = 1 #左股ヨー
jspn[6] = 1 #左股ロール
jspn[7] = 1 #左股ピッチ
jspn[8] = 1 #左膝ピッチ
jspn[9] = 1 #左足首ピッチ
jspn[10] = 1 #左足首ロール
jspn[11] = 1 #予備
jspn[12] = 1 #予備
jspn[13] = 1 #予備
jspn[14] = 1 #予備
jspn[15] = 1 #腰ヨー
jspn[16] = 1 #右肩ピッチ
jspn[17] = -1 #右肩ロール
jspn[18] = -1 #右肘ヨー
jspn[19] = 1 #右肘ピッチ
jspn[20] = -1 #右股ヨー
jspn[21] = -1 #右股ロール
jspn[22] = 1 #右股ピッチ
jspn[23] = 1 #右膝ピッチ
jspn[24] = 1 #右足首ピッチ
jspn[25] = -1 #右足首ロール
jspn[26] = 1 #予備
jspn[27] = 1 #予備
jspn[28] = 1 #予備
jspn[29] = 1 #予備
#def udpresv():
# pass
##################################################################################################################################################
# デ ー タ の 送 受 信 ###########################################################################################################################
##################################################################################################################################################
def meridian_loop():
global message0
global message1
global message2
global message3
global message4
global x
global y
while (True):
print("checksss")
message1 = "Waiting for UDP data from "+UDP_SEND_IP+"..."
with closing(sock):
while True:
global loop_count
global r_meridim
global r_meridim_char
global s_meridim_motion
global error_count_pc_to_esp
global error_count_esp_to_tsy
global error_count_tsy_to_esp
global error_count_esp_to_pc
global error_count_esp_skip
global error_count_tsy_skip
global error_count_pc_skip
global frame_sync_s
global frame_sync_r_expect
global frame_sync_r_resv
global flag_servo_power
global flag_demo_action
global flag_send_data
global flag_resv_data
global flag_ros1_pub
global flag_ros1_sub
loop_count += 1 #このpythonを起動してからのフレーム数をカウントアップ
r_bin_data,addr = sock.recvfrom(1472) #UDPに受信したデータを転記してキープ
r_meridim=struct.unpack('90h',r_bin_data) #short型のMeridim90を作成
r_meridim_char=struct.unpack('180b',r_bin_data) #読み取り用のchar型Meridim(180)を作成
message1 = "UDP data receiving from "+UDP_SEND_IP #受信中のメッセージ表示
#受信データに対するチェックサムの実行
checksum = np.array([0], dtype=np.int16)
for i in range(MSG_SIZE-1):
checksum[0] += r_meridim[i]
checksum[0] = ~checksum[0]
# エラーフラグ各種のカウントアップ
temp = np.array([0], dtype=np.int16)#short型のビットをpythonで扱うためのnumpy配列テンポラリ変数
if checksum[0] == r_meridim[MSG_SIZE-1]:
if (r_meridim[88] >> 14 & 1) == 1:#エラーフラグ14ビット目(ESP32のPCからのUDP受信のエラーフラグ)を調べる
error_count_pc_to_esp += 1
if (r_meridim[88] >> 13 & 1) == 1:#エラーフラグ13ビット目(TeensyのESP32からのSPI受信のエラーフラグ)を調べる
error_count_esp_to_tsy += 1
if (r_meridim[88] >> 12 & 1) == 1:#エラーフラグ12ビット目(ESPのTeensyからのSPI受信のエラーフラグ)を調べる
error_count_tsy_to_esp += 1
if (r_meridim[88] >> 10 & 1) == 1:#エラーフラグ10ビット目(ESPのPCからのUDP受信のフレーム連番スキップフラグ)を調べる
error_count_esp_skip += 1
if (r_meridim[88] >> 9 & 1) == 1:#エラーフラグ9ビット目(TeensyのESP経由のPCから受信のフレーム連番スキップフラグ)を調べる
error_count_tsy_skip += 1
temp[0] = r_meridim[88] & 0b0111111111111111 #エラーフラグ15ビット目(PCのUDP受信エラーフラグ)を下げる
else:
temp[0] = r_meridim[88] | 0b1000000000000000 #エラーフラグ15ビット目(PCのUDP受信エラーフラグ)を上げる
error_count_esp_to_pc += 1 #PCのUDP受信エラーをカウントアップ
#受信予測用のカウントアップ
frame_sync_r_expect += 1
if frame_sync_r_expect > 199:
frame_sync_r_expect = 0
#フレームスキップチェック用のカウントの受信と処理
frame_sync_r_resv = r_meridim[88] & 0b0000000011111111 #カウントを受信
if(frame_sync_r_resv == frame_sync_r_expect): #受信したカウントが予想通りであればスキップなし
temp[0] &= 0b1111111011111111 #PCのESP経由Teensyからの連番スキップフラグを下げる
else:
#print("Found data unsync on PC.")
temp[0] |= 0b0000000100000000 #PCのESP経由Teensyからの連番スキップフラグを上げる
frame_sync_r_expect = frame_sync_r_resv #受信カウントの方が多ければズレを検出し、追いつく
error_count_pc_skip += 1 #スキップカウントをプラス
#送信用クロックの準備
frame_sync_s += 1 #送信用のframe_sync_sをカウントアップ
if frame_sync_s > 199:
frame_sync_s = 0
temp[0] &= 0b1111111100000000 #下位8ビットをクリア
temp[0] += frame_sync_s #下位8ビットにframe_sync_sカウントアップを格納
#PC側サーボ位置発信用に最終サーボ情報をキープ
if flag_servo_power == 2: #サーボオンボタン押下初回のみ最終受け取りサーボ情報をキープ
for i in range(21,81,2):
s_meridim_motion[i] = r_meridim[i]
s_meridim_motion_keep[i] = r_meridim[i]
flag_servo_power = 1
# 送信用のモーションを作成(①受信値そのまま、②ROSサブスク反映、③計算モーション)
if checksum[0] == r_meridim[MSG_SIZE-1]:#受信成功時はデータ更新
s_meridim=[] #データのクリア
s_meridim=list(r_meridim)
#①受信値そのままの場合:送信データのベースを受信データのコピーで作成
if flag_servo_power:#サーボパワーオン時は、電源入力時に保持した値を固定で流す(ハウリング的なサーボ位置ズレの増幅を防止)
for i in range(21,81,2):
s_meridim[i] = s_meridim_motion_keep[i]
else:
if flag_resv_data:
for i in range(21,81,2):#受信サーボ値を書き込みモーションのベースとして一旦キープ
s_meridim_motion[i] = r_meridim[i]
#②サーボ位置にROSのサブスクライブを反映させる場合にはここでデータを作成★★
if flag_ros1_sub:
for i in range(11):
s_meridim_motion[21+i*2] = s_meridim_js_sub[21+i*2]
s_meridim_motion[51+i*2] = s_meridim_js_sub[51+i*2]
#③サーボ位置をここで計算制御する場合は以下でデータを作成(まずはデモモーションのみで運用テスト)
if flag_demo_action: #
# xをフレームごとにカウントアップ
x += math.pi/STEP
if x>math.pi*2000: #1000周期でリセット
x = 0
#プラマイ10度の間で頭ヨー軸のみにサインカーブを出力
s_meridim_motion[51] = int(np.sin(x)*3000) #頭ヨー
s_meridim_motion[23] = int(np.sin(x)*1000) +2000 #左肩ピッチ
s_meridim_motion[25] = -int(np.sin(x*2)*1000) +1000#左肩ロール
s_meridim_motion[27] = int(np.sin(x)*1000) +1000#左肘ヨー
s_meridim_motion[29] = int(np.sin(x)*3000) -3000#左肘ピッチ
s_meridim_motion[31] = int(np.sin(x)*500) # #左股ヨー
s_meridim_motion[33] = -int(np.sin(x)*400) #左股ロール
s_meridim_motion[35] = int(np.sin(x*2)*2000) -300 #左股ピッチ
s_meridim_motion[37] = -int(np.sin(x*2)*4000) #左膝ピッチ
s_meridim_motion[39] = int(np.sin(x*2)*2000) #左足首ピッチ
s_meridim_motion[41] = int(np.sin(x)*400) #左足首ロール
s_meridim_motion[21] = -int(np.sin(x)*2000) #腰ヨー
s_meridim_motion[53] = -int(np.sin(x)*1000) +2000 #右肩ピッチ
s_meridim_motion[55] = -int(np.sin(x*2)*1000) +1000#右肩ロール
s_meridim_motion[57] = -int(np.sin(x)*1000) +1000#右肘ヨー
s_meridim_motion[59] = -int(np.sin(x)*3000) -3000#右肘ピッチ
s_meridim_motion[61] = -int(np.sin(x)*500) #右股ヨー
s_meridim_motion[63] = int(np.sin(x)*400) #右股ロール
s_meridim_motion[65] = -int(np.sin(x*2)*2000) -300 #右股ピッチ
s_meridim_motion[67] = int(np.sin(x*2)*4000) #右膝ピッチ
s_meridim_motion[69] = -int(np.sin(x*2)*2000) #右足首ピッチ
s_meridim_motion[71] = -int(np.sin(x)*400) #右足首ロール
# データを送信Meridim配列に格納
#サーボオンオフフラグチェック:サーボオンフラグを格納
if flag_servo_power > 0:
for i in range(20,80,2):
s_meridim[i] = 1
else:
for i in range(20,80,2):
s_meridim[i] = 0
#PC側発行のサーボ位置を格納
if flag_send_data:
for i in range(21,81,2):
s_meridim[i] = s_meridim_motion[i]
#マスターコマンドフラグチェック:ヨー軸センターリセットコマンドを格納
global flag_update_yaw_center
if (flag_update_yaw_center > 0):
flag_update_yaw_center -= 1
s_meridim[0] = CMD_SET_YAW_CENTER
if (flag_update_yaw_center==0):
print("Send COMMAND 'Set Yaw Center.':["+str(CMD_SET_YAW_CENTER)+"]")
#キープしたエラーフラグ/送信クロックを格納
s_meridim[88] = temp[0]
#格納した送信データについてチェックサムを追加
checksum[0] = 0
checksum_int = 0
for i in range(MSG_SIZE-1):
checksum_int += s_meridim[i]
checksum[0] = ~checksum_int
s_meridim[MSG_SIZE-1]=checksum[0]
time.sleep(2/1000) #少し休む場合
#データをパックしてUDP送信
s_bin_data=struct.pack('90h',*s_meridim)
sock.sendto(s_bin_data,(UDP_SEND_IP,UDP_SEND_PORT))
#print("Frame "+str(int(frame_sync_r_resv - frame_sync_r_resv_past)))
now = time.time()-start
message2="ERROR COUNT ESP-PC:"+str("{:}".format(error_count_esp_to_pc))+\
" PC-ESP:"+str("{:}".format(error_count_pc_to_esp))+" ESP-TSY:"+str("{:}".format(error_count_esp_to_tsy))
message3="ERROR RATE ESP-PC:"+str("{:.2%}".format(error_count_esp_to_pc/loop_count))+\
" PC-ESP:"+str("{:.2%}".format(error_count_pc_to_esp/loop_count))+" ESP-TSY:"+str("{:.2%}".format(error_count_esp_to_tsy/loop_count))
message4="SKIP COUNT TsySKIP:"+\
str("{:}".format(error_count_tsy_skip))+" ESPSKIP:"+str("{:}".format(error_count_esp_skip))+" PcSKIP:"+str("{:}".format(error_count_pc_skip))+\
" Frames:"+str(loop_count)+" "+str(int(loop_count/now))+"Hz"
##################################################################################################################################################
# 関 数 各 種 ####################################################################################################################################
##################################################################################################################################################
def cleanup():#ctrl+cで終了したときにも確実にソケットを閉じる試み(いまのところ機能していない)
print("Meridan_console quited.")
atexit.register(cleanup)#この行は機能しているかどうかわからない
def set_servo_power():#チェックボックスに従いサーボパワーオンフラグをオンオフ
global flag_servo_power
if flag_servo_power == 0 :
flag_servo_power = 2
print("Servo Power ON")
else:
flag_servo_power = 0
print("Servo Power OFF")
def set_demo_action():#チェックボックスに従いアクション送信フラグをオンオフ
global flag_demo_action
if flag_demo_action == 0 :
flag_demo_action = 1
print("Start DEMO motion data streaming.")
else:
flag_demo_action = 0
print("Quit DEMO motion data streaming.")
def set_resv_data():#チェックボックスに従いデータ送信フラグをオンオフ
global flag_resv_data
if flag_resv_data == 0 :
flag_resv_data = 1
print("Start receiving data from ESP32.")
else:
flag_resv_data = 0
print("Quit receiving data from ESP32.")
def set_send_data():#チェックボックスに従いデータ送信フラグをオンオフ
global flag_send_data
if flag_send_data == 0 :
flag_send_data = 1
print("Start sending data to ESP32.")
else:
flag_send_data = 0
print("Quit sending data to ESP32.")
def set_send_virtual():#チェックボックスに従いデータ送信フラグをオンオフ
global flag_send_virtual
if flag_send_virtual == 0 :
flag_send_virtual = 1
print("Start noting. Virtual-Hard Uninplemented.")
else:
flag_send_virtual = 0
print("Quit nothing. Virtual-Hard Uninplemented")
def ros1_pub():#チェックボックスに従いROS1パブリッシュフラグをオンオフ
#print("ROS1 is not available.")
global flag_ros1_pub
if flag_ros1_pub == 0 :
flag_ros1_pub = 1
print("Start publishing ROS1 joint_states.")
else:
flag_ros1_pub = 0
print("Quit publishing ROS1 joint_states.")
def ros1_sub():#チェックボックスに従いROS1サブスクライブフラグをオンオフ
global flag_ros1_sub
if flag_ros1_sub == 0 :
flag_ros1_sub = 1
print("Start subscribing ROS1 joint_states.")
else:
flag_ros1_sub = 0
print("Quit publishing ROS1 joint_states.")
def set_servo_angle(channel, app_data):#
global s_meridim_motion
if channel[3]=="L":
s_meridim_motion[int(channel[4:6])*2+21] = int(app_data*100)
print(f"L meri: {int(channel[4:6])*2+21}")
if channel[3]=="R":
s_meridim_motion[int(channel[4:6])*2+51] = int(app_data*100)
print(f"R meri: {int(channel[4:6])*2+51}")
print(f"channel is: {channel[3]}")
print(f"channel is: {channel[4:6]}")
print(f"app_data is: {int(app_data*100)}")
print(f"motion is: {s_meridim_motion[int(channel[4:6])+21]}")
def callback(JointState):
global s_meridim_js_sub
global jspn
for i in range(11):
s_meridim_js_sub[21+i*2]=round(JointState.position[i]*10000)*jspn[i]
s_meridim_js_sub[51+i*2]=round(JointState.position[11+i]*10000)*jspn[15+i]
##################################################################################################################################################
# dearpygui に よ る コ ン ソ ー ル 画 面 描 写 ##################################################################################################
##################################################################################################################################################
def main():
global r_meridim
global flag_ros1
global jspn
# dpg用関数 ==================================================
def set_yaw_center():#IMUのヨー軸センターリセットフラグを10上げる(コマンドを10回送信する)
global flag_update_yaw_center
flag_update_yaw_center = 20
def reset_counter():#カウンターのリセット
global loop_count
global error_count_pc_to_esp
global error_count_esp_to_tsy
global error_count_tsy_to_esp
global error_count_esp_to_pc
global error_count_tsy_skip
global error_count_esp_skip
global error_count_pc_skip
global start
loop_count = 1
error_count_pc_to_esp = 0
error_count_esp_to_tsy = 0
error_count_tsy_to_esp = 0
error_count_esp_to_pc = 0
error_count_tsy_skip = 0
error_count_esp_skip = 0
error_count_pc_skip = 0
start = time.time()
while(True):
# dpg描画 ==================================================
dpg.create_context()
dpg.create_viewport(title=TITLE_VERSION, width=600, height=520)
# (画面左上)サーボ位置モニタリング用のウィンドウ ==================================================
with dpg.window(label="Axis Monitor", width=250, height=350,pos=[5,5]):
with dpg.group(label='LeftSide'):
for i in range(0, 15, 1):
dpg.add_slider_float(default_value=0, tag="ID L"+str(i),label="L"+str(i),max_value=100,min_value=-100,callback=set_servo_angle,pos=[10,35+i*20], width=80)
with dpg.group(label='RightSide'):
for i in range(0, 15, 1):
dpg.add_slider_float(default_value=0, tag="ID R"+str(i),label="R"+str(i),max_value=100,min_value=-100,callback=set_servo_angle,pos=[135,35+i*20], width=80)
# (画面下段)メッセージ表示用ウィンドウ(アドレス・通信エラー等) ==================================================
with dpg.window(label="Messege", width=590, height=155,pos=[5,360]):
dpg.add_button(label="ResetCounter", callback=reset_counter, width =90, pos=[470,30])
dpg.add_text(message0,tag="DispMessage0")
dpg.add_text(message1,tag="DispMessage1")
dpg.add_text(message2,tag="DispMessage2")
dpg.add_text(message3,tag="DispMessage3")
dpg.add_text(message4,tag="DispMessage4")
# (画面右側)センサー値モニタリング用ウィンドウ ==================================================
with dpg.window(label="Sensor Monitor", width=335, height=175,pos=[260,5]):
with dpg.group(label='LeftSide'):
dpg.add_slider_float(default_value=0, tag="mpu0", label="ac_x",max_value=327,min_value=-327,pos=[10,35], width=60)
dpg.add_slider_float(default_value=0, tag="mpu1", label="ac_y",max_value=327,min_value=-327,pos=[115,35], width=60)
dpg.add_slider_float(default_value=0, tag="mpu2", label="ac_z",max_value=327,min_value=-327,pos=[220,35], width=60)
dpg.add_slider_float(default_value=0, tag="mpu3", label="gr_x",max_value=327,min_value=-327,pos=[10,55], width=60)
dpg.add_slider_float(default_value=0, tag="mpu4", label="gr_y",max_value=327,min_value=-327,pos=[115,55], width=60)
dpg.add_slider_float(default_value=0, tag="mpu5", label="gr_z",max_value=327,min_value=-327,pos=[220,55], width=60)
dpg.add_slider_float(default_value=0, tag="mpu6", label="mg_x",max_value=327,min_value=-327,pos=[10,75], width=60)
dpg.add_slider_float(default_value=0, tag="mpu7", label="mg_y",max_value=327,min_value=-327,pos=[115,75], width=60)
dpg.add_slider_float(default_value=0, tag="mpu8", label="mg_z",max_value=327,min_value=-327,pos=[220,75], width=60)
dpg.add_slider_float(default_value=0, tag="mpu9", label="temp",max_value=327,min_value=-327,pos=[10,95], width=60)
dpg.add_slider_float(default_value=0, tag="mpu10", label="rol",max_value=327,min_value=-327,pos=[10,120], width=60)
dpg.add_slider_float(default_value=0, tag="mpu11", label="pit",max_value=327,min_value=-327,pos=[115,120], width=60)
dpg.add_slider_float(default_value=0, tag="mpu12", label="yaw",max_value=327,min_value=-327,pos=[220,120], width=60)
dpg.add_button(label="SetYaw", callback=set_yaw_center, width =50, pos=[270,148])
# (画面右側中央段)コマンド送信/リモコン値表示用ウィンドウ ==================================================
with dpg.window(label="Command", width=335, height=170,pos=[260,185]):
dpg.add_checkbox(label="Power", tag="Power", callback=set_servo_power, pos=[8,50])
dpg.add_checkbox(tag="Receive", callback=set_resv_data, pos=[160,27])
dpg.add_text("ESP32->", pos=[100,27])
dpg.add_checkbox(tag="Send", callback=set_send_data, pos=[160,50])
dpg.add_text("ESP32<-", pos=[100,50])
dpg.add_checkbox(tag="Virtual", callback=set_send_virtual, pos=[160,73])
dpg.add_text("Virtual<-", pos=[86,73])
dpg.add_checkbox(label="->ROS1", tag="ROS1pub", callback=ros1_pub, pos=[192,27])
dpg.add_checkbox(label="<-ROS1", tag="ROS1sub", callback=ros1_sub, pos=[192,50])
dpg.add_checkbox(label="<-Demo", tag="Action", callback=set_demo_action, pos=[192,73])
dpg.add_text("Control Pad Monitor", pos=[10,100])
dpg.add_text("button",tag="pad_button", pos=[170,100])
dpg.add_slider_int(default_value=0, tag="pad_Lx", label="Lx",max_value=127,min_value=-127, pos=[10,120], width=40)
dpg.add_slider_int(default_value=0, tag="pad_Ly", label="Ly",max_value=127,min_value=-127, pos=[90,120], width=40)
dpg.add_slider_int(default_value=0, tag="pad_Rx", label="Rx",max_value=127,min_value=-127, pos=[170,120], width=40)
dpg.add_slider_int(default_value=0, tag="pad_Ry", label="Ry",max_value=127,min_value=-127, pos=[250,120], width=40)
dpg.add_slider_int(default_value=0, tag="pad_L2v", label="L2v",max_value=255,min_value=0, pos=[90,140], width=40)
dpg.add_slider_int(default_value=0, tag="pad_R2v", label="R2v",max_value=255,min_value=0, pos=[170,140], width=40)
# dpg変数値の登録
with dpg.value_registry():
dpg.add_int_value(tag="button_data")
dpg.setup_dearpygui()
dpg.show_viewport()
# dpg 描 画 内 容 の デ ー タ 更 新 ==================================================
while dpg.is_dearpygui_running():
signal.signal(signal.SIGINT, signal.SIG_DFL)
global r_meridim
global s_meridim_motion
#メッセージ欄の表示更新
dpg.set_value("DispMessage0", message0) #メッセージ欄表示用
dpg.set_value("DispMessage1", message1) #メッセージ欄表示用
dpg.set_value("DispMessage2", message2) #メッセージ欄表示用
dpg.set_value("DispMessage3", message3) #メッセージ欄表示用
dpg.set_value("DispMessage4", message4) #メッセージ欄表示用
#サーボデータとIMUデータの表示更新
for i in range(0, 15, 1):
#global button
idld = r_meridim[21+i*2]
idrd = r_meridim[51+i*2]
idsensor = r_meridim[i+2]/10000
dpg.set_value("ID L"+str(i), idld/100) #サーボIDと数値の表示L側
dpg.set_value("ID R"+str(i), idrd/100) #サーボIDと数値の表示R側
if i < 13: #IMUデータの更新
if i < 11:
dpg.set_value("mpu"+str(i),idsensor)
else:
dpg.set_value("mpu"+str(i),idsensor*100)
#リモコンデータの表示更新
dpg.set_value("pad_button", str(r_meridim[80]))
dpg.set_value("pad_Lx", r_meridim_char[163])
dpg.set_value("pad_Ly", r_meridim_char[162])
dpg.set_value("pad_Rx", r_meridim_char[165])
dpg.set_value("pad_Ry", r_meridim_char[164])
padL2val = (r_meridim_char[167])
if (padL2val<0):
padL2val = 256+padL2val
if (r_meridim[80]&256==0):
padL2val = 0
padR2val = (r_meridim_char[166])
if (padR2val<0):
padR2val = 256+padR2val
if (r_meridim[80]&512==0):
padR2val = 0
dpg.set_value("pad_L2v", padL2val)
dpg.set_value("pad_R2v", padR2val)
dpg.set_value("button_data", r_meridim[80])
# ROS1 joint_statesのパブリッシュ =====================================================================================
if flag_ros1_pub:#ROS送信:joint_statesのpublishを実施
if flag_ros1==0:
rospy.init_node('joint_state_meridim', anonymous=True)
flag_ros1=1
joint_pub = rospy.Publisher('joint_states', JointState, queue_size=10)
rate = rospy.Rate(100) # 100hz
js_meridim = JointState()
js_meridim.header.stamp = rospy.Time.now()
js_meridim.name =\
['c_chest_yaw', 'l_shoulder_pitch', 'l_shoulder_roll', 'l_elbow_yaw',\
'l_elbow_pitch', 'l_hipjoint_yaw', 'l_hipjoint_roll', 'l_hipjoint_pitch',\
'l_knee_pitch', 'l_ankle_pitch', 'l_ankle_roll', \
'c_head_yaw', 'r_shoulder_pitch', 'r_shoulder_roll', 'r_elbow_yaw',\
'r_elbow_pitch', 'r_hipjoint_yaw', 'r_hipjoint_roll', 'r_hipjoint_pitch',\
'r_knee_pitch', 'r_ankle_pitch', 'r_ankle_roll']
js_meridim.position = \
[math.radians(s_meridim_motion[21]/100*jspn[0]), math.radians(s_meridim_motion[23]/100*jspn[1]), math.radians(s_meridim_motion[25]/100)*jspn[2], math.radians(s_meridim_motion[27]/100*jspn[3]),\
math.radians(s_meridim_motion[29]/100*jspn[4]), math.radians(s_meridim_motion[31]/100*jspn[5]), math.radians(s_meridim_motion[33]/100*jspn[6]), math.radians(s_meridim_motion[35]/100*jspn[7]), \
math.radians(s_meridim_motion[37]/100*jspn[8]),math.radians(s_meridim_motion[39]/100*jspn[9]), math.radians(s_meridim_motion[41]/100*jspn[10]),\
math.radians(s_meridim_motion[51]/100*jspn[15]),math.radians(s_meridim_motion[53]/100*jspn[16]), math.radians(s_meridim_motion[55]/100*jspn[17]), math.radians(s_meridim_motion[57]/100*jspn[18]), \
math.radians(s_meridim_motion[59]/100*jspn[19]), math.radians(s_meridim_motion[61]/100*jspn[20]), math.radians(s_meridim_motion[63]/100*jspn[21]), math.radians(s_meridim_motion[65]/100*jspn[22]), \
math.radians(s_meridim_motion[67]/100*jspn[23]), math.radians(s_meridim_motion[69]/100*jspn[24]), math.radians(s_meridim_motion[71]/100*jspn[25])]
js_meridim.velocity = []
#js_meridim.velocity = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
js_meridim.effort = []
joint_pub.publish(js_meridim)
rate.sleep()
# R O S 1 joint_statesのサブスクライブ ====================================================================================
if flag_ros1_sub:
#joint_sub = rospy.Subscriber('joint_states', JointState, queue_size=10)
if flag_ros1==0:
rospy.init_node('joint_state_meridim', anonymous=True)
flag_ros1=1
rospy.Subscriber('joint_states', JointState, callback)
#rospy.spin()
# ====================================================================================================
#dpg表示更新処理
dpg.render_dearpygui_frame()
dpg.destroy_context()
#スレッド2つで送受信と画面描写を並列処理
if __name__ == '__main__':
thread1 = threading.Thread(target=meridian_loop) #dearpyguiによるコンソール画面描写スレッド
thread1.start()
main()
|
tentacle.py
|
import time
import threading
import logging
import pyDatalog
import pickle
from tp_utils import pipe
logger = logging.getLogger(__name__)
def data_handler(entity_zoo, data):
if len(data) == 0:
return
opcode, msg = pickle.loads(data)
if opcode == 'query_clause':
with entity_zoo.lock:
try:
answer = pyDatalog.pyDatalog.ask(msg)
if answer is None:
logger.info('clause %s has no result', msg)
return
output = ""
for line in answer.answers:
output += str(line) + '\n'
return output
except Exception as err:
logger.info('failed to query_clause, the clause is %s', msg)
elif opcode == 'query_entity':
with entity_zoo.lock:
try:
logical_entity = entity_zoo.entity_set[msg]
except Exception as err:
logger.info('failed to query_entity, the query entity is %s', msg)
return
return str(logical_entity)
else:
logger.info('unsupport command type:%s', opcode)
def monitor_debug_info(entity_zoo, extra):
pyDatalog.Logic(extra['logic'])
read_fn = pipe.create_debug_pipe(pipe.DEBUG_PIPE_PATH, data_handler)
read_fn(entity_zoo)
def start_monitor_debug_info(entity_zoo, extra):
t = threading.Thread(target = monitor_debug_info, args = (entity_zoo, extra))
t.setDaemon(True)
t.start()
return t
|
parasol.py
|
# Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from builtins import next
from builtins import str
from past.utils import old_div
from future.utils import listitems
import logging
import os
import re
import sys
import subprocess
import tempfile
import time
from threading import Thread
# Python 3 compatibility imports
from six.moves.queue import Empty, Queue
from six import itervalues
from bd2k.util.iterables import concat
from bd2k.util.processes import which
from toil.batchSystems.abstractBatchSystem import BatchSystemSupport
from toil.lib.bioio import getTempFile
from toil.common import Toil
logger = logging.getLogger(__name__)
class ParasolBatchSystem(BatchSystemSupport):
"""
The interface for Parasol.
"""
@classmethod
def supportsWorkerCleanup(cls):
return False
@classmethod
def supportsHotDeployment(cls):
return False
def __init__(self, config, maxCores, maxMemory, maxDisk):
super(ParasolBatchSystem, self).__init__(config, maxCores, maxMemory, maxDisk)
if maxMemory != sys.maxsize:
logger.warn('The Parasol batch system does not support maxMemory.')
# Keep the name of the results file for the pstat2 command..
command = config.parasolCommand
if os.path.sep not in command:
try:
command = next(which(command))
except StopIteration:
raise RuntimeError("Can't find %s on PATH." % command)
logger.info('Using Parasol at %s', command)
self.parasolCommand = command
jobStoreType, path = Toil.parseLocator(config.jobStore)
if jobStoreType != 'file':
raise RuntimeError("The parasol batch system doesn't currently work with any "
"jobStore type except file jobStores.")
self.parasolResultsDir = tempfile.mkdtemp(dir=os.path.abspath(path))
logger.debug("Using parasol results dir: %s", self.parasolResultsDir)
# In Parasol, each results file corresponds to a separate batch, and all jobs in a batch
# have the same cpu and memory requirements. The keys to this dictionary are the (cpu,
# memory) tuples for each batch. A new batch is created whenever a job has a new unique
# combination of cpu and memory requirements.
self.resultsFiles = dict()
self.maxBatches = config.parasolMaxBatches
# Allows the worker process to send back the IDs of jobs that have finished, so the batch
# system can decrease its used cpus counter
self.cpuUsageQueue = Queue()
# Also stores finished job IDs, but is read by getUpdatedJobIDs().
self.updatedJobsQueue = Queue()
# Use this to stop the worker when shutting down
self.running = True
self.worker = Thread(target=self.updatedJobWorker, args=())
self.worker.start()
self.usedCpus = 0
self.jobIDsToCpu = {}
# Set of jobs that have been issued but aren't known to have finished or been killed yet.
# Jobs that end by themselves are removed in getUpdatedJob, and jobs that are killed are
# removed in killBatchJobs.
self.runningJobs = set()
def _runParasol(self, command, autoRetry=True):
"""
Issues a parasol command using popen to capture the output. If the command fails then it
will try pinging parasol until it gets a response. When it gets a response it will
recursively call the issue parasol command, repeating this pattern for a maximum of N
times. The final exit value will reflect this.
"""
command = list(concat(self.parasolCommand, command))
while True:
logger.debug('Running %r', command)
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=-1)
stdout, stderr = process.communicate()
status = process.wait()
for line in stderr.split('\n'):
if line: logger.warn(line)
if status == 0:
return 0, stdout.split('\n')
message = 'Command %r failed with exit status %i' % (command, status)
if autoRetry:
logger.warn(message)
else:
logger.error(message)
return status, None
logger.warn('Waiting for a 10s, before trying again')
time.sleep(10)
parasolOutputPattern = re.compile("your job ([0-9]+).*")
def issueBatchJob(self, jobNode):
"""
Issues parasol with job commands.
"""
self.checkResourceRequest(jobNode.memory, jobNode.cores, jobNode.disk)
MiB = 1 << 20
truncatedMemory = (old_div(jobNode.memory, MiB)) * MiB
# Look for a batch for jobs with these resource requirements, with
# the memory rounded down to the nearest megabyte. Rounding down
# meams the new job can't ever decrease the memory requirements
# of jobs already in the batch.
if len(self.resultsFiles) >= self.maxBatches:
raise RuntimeError( 'Number of batches reached limit of %i' % self.maxBatches)
try:
results = self.resultsFiles[(truncatedMemory, jobNode.cores)]
except KeyError:
results = getTempFile(rootDir=self.parasolResultsDir)
self.resultsFiles[(truncatedMemory, jobNode.cores)] = results
# Prefix the command with environment overrides, optionally looking them up from the
# current environment if the value is None
command = ' '.join(concat('env', self.__environment(), jobNode.command))
parasolCommand = ['-verbose',
'-ram=%i' % jobNode.memory,
'-cpu=%i' % jobNode.cores,
'-results=' + results,
'add', 'job', command]
# Deal with the cpus
self.usedCpus += jobNode.cores
while True: # Process finished results with no wait
try:
jobID = self.cpuUsageQueue.get_nowait()
except Empty:
break
if jobID in list(self.jobIDsToCpu.keys()):
self.usedCpus -= self.jobIDsToCpu.pop(jobID)
assert self.usedCpus >= 0
while self.usedCpus > self.maxCores: # If we are still waiting
jobID = self.cpuUsageQueue.get()
if jobID in list(self.jobIDsToCpu.keys()):
self.usedCpus -= self.jobIDsToCpu.pop(jobID)
assert self.usedCpus >= 0
# Now keep going
while True:
line = self._runParasol(parasolCommand)[1][0]
match = self.parasolOutputPattern.match(line)
if match is None:
# This is because parasol add job will return success, even if the job was not
# properly issued!
logger.info('We failed to properly add the job, we will try again after a 5s.')
time.sleep(5)
else:
jobID = int(match.group(1))
self.jobIDsToCpu[jobID] = jobNode.cores
self.runningJobs.add(jobID)
logger.debug("Got the parasol job id: %s from line: %s" % (jobID, line))
return jobID
def setEnv(self, name, value=None):
if value and ' ' in value:
raise ValueError('Parasol does not support spaces in environment variable values.')
return super(ParasolBatchSystem, self).setEnv(name, value)
def __environment(self):
return (k + '=' + (os.environ[k] if v is None else v) for k, v in listitems(self.environment))
def killBatchJobs(self, jobIDs):
"""Kills the given jobs, represented as Job ids, then checks they are dead by checking
they are not in the list of issued jobs.
"""
while True:
for jobID in jobIDs:
if jobID in self.runningJobs:
self.runningJobs.remove(jobID)
exitValue = self._runParasol(['remove', 'job', str(jobID)],
autoRetry=False)[0]
logger.info("Tried to remove jobID: %i, with exit value: %i" % (jobID, exitValue))
runningJobs = self.getIssuedBatchJobIDs()
if set(jobIDs).difference(set(runningJobs)) == set(jobIDs):
break
logger.warn( 'Tried to kill some jobs, but something happened and they are still '
'going, will try againin 5s.')
time.sleep(5)
# Update the CPU usage, because killed jobs aren't written to the results file.
for jobID in jobIDs:
if jobID in list(self.jobIDsToCpu.keys()):
self.usedCpus -= self.jobIDsToCpu.pop(jobID)
runningPattern = re.compile(r'r\s+([0-9]+)\s+[\S]+\s+[\S]+\s+([0-9]+)\s+[\S]+')
def getJobIDsForResultsFile(self, resultsFile):
"""
Get all queued and running jobs for a results file.
"""
jobIDs = []
for line in self._runParasol(['-extended', 'list', 'jobs'])[1]:
fields = line.strip().split()
if len(fields) == 0 or fields[-1] != resultsFile:
continue
jobID = fields[0]
jobIDs.append(int(jobID))
return set(jobIDs)
def getIssuedBatchJobIDs(self):
"""
Gets the list of jobs issued to parasol in all results files, but not including jobs
created by other users.
"""
issuedJobs = set()
for resultsFile in itervalues(self.resultsFiles):
issuedJobs.update(self.getJobIDsForResultsFile(resultsFile))
return list(issuedJobs)
def getRunningBatchJobIDs(self):
"""
Returns map of running jobIDs and the time they have been running.
"""
# Example lines..
# r 5410186 benedictpaten worker 1247029663 localhost
# r 5410324 benedictpaten worker 1247030076 localhost
runningJobs = {}
issuedJobs = self.getIssuedBatchJobIDs()
for line in self._runParasol(['pstat2'])[1]:
if line != '':
match = self.runningPattern.match(line)
if match is not None:
jobID = int(match.group(1))
startTime = int(match.group(2))
if jobID in issuedJobs: # It's one of our jobs
runningJobs[jobID] = time.time() - startTime
return runningJobs
def getUpdatedBatchJob(self, maxWait):
while True:
try:
jobID, status, wallTime = self.updatedJobsQueue.get(timeout=maxWait)
except Empty:
return None
try:
self.runningJobs.remove(jobID)
except KeyError:
# We tried to kill this job, but it ended by itself instead, so skip it.
pass
else:
return jobID, status, wallTime
@classmethod
def getRescueBatchJobFrequency(cls):
"""
Parasol leaks jobs, but rescuing jobs involves calls to parasol list jobs and pstat2,
making it expensive.
"""
return 5400 # Once every 90 minutes
def updatedJobWorker(self):
"""
We use the parasol results to update the status of jobs, adding them
to the list of updated jobs.
Results have the following structure.. (thanks Mark D!)
int status; /* Job status - wait() return format. 0 is good. */
char *host; /* Machine job ran on. */
char *jobId; /* Job queuing system job ID */
char *exe; /* Job executable file (no path) */
int usrTicks; /* 'User' CPU time in ticks. */
int sysTicks; /* 'System' CPU time in ticks. */
unsigned submitTime; /* Job submission time in seconds since 1/1/1970 */
unsigned startTime; /* Job start time in seconds since 1/1/1970 */
unsigned endTime; /* Job end time in seconds since 1/1/1970 */
char *user; /* User who ran job */
char *errFile; /* Location of stderr file on host */
Plus you finally have the command name.
"""
resultsFiles = set()
resultsFileHandles = []
try:
while self.running:
# Look for any new results files that have been created, and open them
newResultsFiles = set(os.listdir(self.parasolResultsDir)).difference(resultsFiles)
for newFile in newResultsFiles:
newFilePath = os.path.join(self.parasolResultsDir, newFile)
resultsFileHandles.append(open(newFilePath, 'r'))
resultsFiles.add(newFile)
for fileHandle in resultsFileHandles:
while self.running:
line = fileHandle.readline()
if not line:
break
assert line[-1] == '\n'
(status, host, jobId, exe, usrTicks, sysTicks, submitTime, startTime,
endTime, user, errFile, command) = line[:-1].split(None, 11)
status = int(status)
jobId = int(jobId)
if os.WIFEXITED(status):
status = os.WEXITSTATUS(status)
else:
status = -status
self.cpuUsageQueue.put(jobId)
startTime = int(startTime)
endTime = int(endTime)
if endTime == startTime:
# Both, start and end time is an integer so to get sub-second
# accuracy we use the ticks reported by Parasol as an approximation.
# This isn't documented but what Parasol calls "ticks" is actually a
# hundredth of a second. Parasol does the unit conversion early on
# after a job finished. Search paraNode.c for ticksToHundreths. We
# also cheat a little by always reporting at least one hundredth of a
# second.
usrTicks = int(usrTicks)
sysTicks = int(sysTicks)
wallTime = float( max( 1, usrTicks + sysTicks) ) * 0.01
else:
wallTime = float(endTime - startTime)
self.updatedJobsQueue.put((jobId, status, wallTime))
time.sleep(1)
except:
logger.warn("Error occurred while parsing parasol results files.")
raise
finally:
for fileHandle in resultsFileHandles:
fileHandle.close()
def shutdown(self):
self.killBatchJobs(self.getIssuedBatchJobIDs()) # cleanup jobs
for results in itervalues(self.resultsFiles):
exitValue = self._runParasol(['-results=' + results, 'clear', 'sick'],
autoRetry=False)[0]
if exitValue is not None:
logger.warn("Could not clear sick status of the parasol batch %s" % results)
exitValue = self._runParasol(['-results=' + results, 'flushResults'],
autoRetry=False)[0]
if exitValue is not None:
logger.warn("Could not flush the parasol batch %s" % results)
self.running = False
logger.debug('Joining worker thread...')
self.worker.join()
logger.debug('... joined worker thread.')
for results in list(self.resultsFiles.values()):
os.remove(results)
os.rmdir(self.parasolResultsDir)
@classmethod
def setOptions(cls, setOption):
from toil.common import iC
setOption("parasolCommand", None, None, 'parasol')
setOption("parasolMaxBatches", int, iC(1), 10000)
|
oppomqtt.py
|
#!/usr/bin/env python
from socket import SO_REUSEADDR, SOCK_STREAM, error, socket, SOL_SOCKET, AF_INET
from threading import Thread
import paho.mqtt.client as mqtt
from oppomessages import OPPOMSG
### CONFIGURATION ####################################################################################################
OPPO_HOST = 'oppohost' # IP address or hostname of the Oppo player
OPPO_PORT = 23 # Port to connect to, normally 25
MQTT_HOST = 'mqtthost' # IP address or hostname of the MQTT server
MQTT_PORT = 1883 # Port to connect to, normally 1883
MQTT_USER = 'mqttuser' # Username for MQTT connection, set to '' for no authentication
MQTT_PASS = 'supersecretpassword' # Password for MQTT connection
MQTT_TOPIC = '/multimedia/oppo/cmd' # Topic to subscribe to for sending commands to oppo and receiving responses
MQTT_BASE = '/multimedia/oppo/' # Base topic to use for sending status update from oppo
##########################################################################################################################
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
mqttc.subscribe(MQTT_TOPIC)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
class Client:
def __init__(self, host, port):
self.s = socket(AF_INET, SOCK_STREAM)
self.host = host
self.port = port
def sendmqtt(self, code, msg):
mqttc.publish(MQTT_BASE + code, '{\'' + msg + '\':\'' + msg + '\'}', retain=True)
def sendoppomqtt(self, code, msg):
mqttc.publish(MQTT_BASE + code, '{\'' + msg + '\':\'' + OPPOMSG[code][msg] + '\'}', retain=True)
def senddirectmqtt(self, msg):
mqttc.publish(MQTT_TOPIC, msg)
def clearoppostatus(self):
self.sendmqtt('UPL', '')
self.sendmqtt('UPL', '')
self.sendmqtt('UVL', '')
self.sendmqtt('UDT', '')
self.sendmqtt('UAT', '')
self.sendmqtt('UST', '')
self.sendmqtt('UIS', '')
self.sendmqtt('U3D', '')
self.sendmqtt('UAR', '')
self.sendmqtt('UTC', '')
self.sendmqtt('UVO', '')
self.sendmqtt('USB', '')
def getmessage(self, data):
code, msg = data.split(' ', 1)
if code[0] == '@':
code = code[1:]
msg = msg.rstrip()
# Check if not debug response
if code == 'OK' or code == 'ER' or code == 'QC1' or code == 'QC2':
# Pass thru response to MQTT_TOPIC
self.senddirectmqtt(code + ' ' + msg)
else:
# Try to parse debug response
if code in OPPOMSG:
if OPPOMSG[code] != None:
if msg in OPPOMSG[code]:
# Send json parsed response
self.sendoppomqtt(code, msg)
# Clear all retained statuses if turned off
if code == 'UPW' and code == '0':
self.clearoppostatus()
else:
if code == 'UVO':
msgs = msg.split(' ')
if len(msgs) == 2:
source = ''
output = ''
if msgs[0] in OPPOMSG[code]:
source = OPPOMSG[code][msgs[0]]
if msgs[1] in OPPOMSG[code]:
output = OPPOMSG[code][msgs[1]]
publish = '{\'' + msg + '\':\'' + 'Source: ' + source + ' - Output: ' + output + '\'}'
mqttc.publish(MQTT_BASE + code, publish, retain=False)
else:
if code == 'UTC':
msgs = msg.split(' ')
if len(msgs) == 4:
title = msgs[0]
chpt = msgs[1]
tc = msgs[2]
time = msgs[3]
if tc == 'E':
tctext = 'Total Remaining time'
elif tc == 'T':
tctext = 'Title Elapsed time'
elif tc == 'X':
tctext = 'Title Remaining time'
elif tc == 'C':
tctext = 'Chapter/track Elapsed time'
elif tc == 'K':
tctext = 'Chapter/track Remaining time'
else:
tctext = 'Unknown Time'
publish = '{\'' + msg + '\':\'' + 'Title: ' + title + ' - Chapter: ' + chpt + ' - ' + tctext + ': ' + time + '\'}'
mqttc.publish(MQTT_BASE + code, publish, retain=False)
else:
# Send json unparsed response
self.sendmqtt(code, msg)
return msg
else:
return False
def run(self):
try:
# Timeout if the no connection can be made in 5 seconds
self.s.settimeout(5)
# Allow socket address reuse
self.s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
# Connect to the host over the given port
self.s.connect((self.host, self.port))
# No time out, blocking
self.s.settimeout(None)
# Initiate Verbose Mode 3
self.s.send("#SVM 3".encode())
while True:
# Wait to receive data back from server
data = self.s.recv(1024).decode('utf-8').rstrip()
# Handle multiple responses sent back from player split by \r
datas = data.split('\r')
for data in datas:
# Send raw message to MQTT_BASE/raw
mqttc.publish(MQTT_BASE + 'raw', data, retain=False)
# Parse message
message = self.getmessage(str(data))
# CLOSE THE SOCKET
self.s.close()
# If something went wrong, notify the user
except error as e:
print("ERROR: ", str(e))
def worker():
# Fork a worker process for handling incoming messages from player
new_client = Client(OPPO_HOST, OPPO_PORT)
new_client.run()
# Create a mqtt client object
mqttc = mqtt.Client()
mqttc.on_connect = on_connect
mqttc.on_message = on_message
# Set username and password if enabled
if MQTT_USER != '':
mqttc.username_pw_set(MQTT_USER, MQTT_PASS)
# Connect to mqtt server
mqttc.connect(MQTT_HOST, MQTT_PORT, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
mqttc.loop_start()
t = Thread(target=worker)
t.daemon = True
t.run()
|
coverage_test.py
|
from queue import Queue
import random
import socket
import threading
import unittest
from coapclient import HelperClient
from coapserver import CoAPServer
from coapthon import defines
from coapthon.messages.message import Message
from coapthon.messages.option import Option
from coapthon.messages.request import Request
from coapthon.messages.response import Response
from coapthon.serializer import Serializer
__author__ = 'Giacomo Tanganelli'
__version__ = "2.0"
class Tests(unittest.TestCase):
def setUp(self):
self.server_address = ("127.0.0.1", 5683)
self.current_mid = random.randint(1, 1000)
self.server_mid = random.randint(1000, 2000)
self.server = CoAPServer("127.0.0.1", 5683)
self.server_thread = threading.Thread(target=self.server.listen, args=(10,))
self.server_thread.start()
self.queue = Queue()
def tearDown(self):
self.server.close()
self.server_thread.join(timeout=25)
self.server = None
def _test_with_client(self, message_list): # pragma: no cover
client = HelperClient(self.server_address)
for message, expected in message_list:
if message is not None:
received_message = client.send_request(message)
if expected is not None:
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, self.server_address)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.payload is not None:
self.assertEqual(received_message.payload, expected.payload)
if expected.options:
self.assertEqual(len(received_message.options), len(expected.options))
for o in expected.options:
assert isinstance(o, Option)
option_value = getattr(expected, o.name.lower().replace("-", "_"))
option_value_rec = getattr(received_message, o.name.lower().replace("-", "_"))
self.assertEqual(option_value, option_value_rec)
client.stop()
def _test_with_client_observe(self, message_list): # pragma: no cover
client = HelperClient(self.server_address)
for message, expected in message_list:
if message is not None:
client.send_request(message, self.client_callback)
if expected is not None:
received_message = self.queue.get()
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, self.server_address)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.payload is not None:
self.assertEqual(received_message.payload, expected.payload)
if expected.options:
self.assertEqual(len(received_message.options), len(expected.options))
for o in expected.options:
assert isinstance(o, Option)
option_value = getattr(expected, o.name.lower().replace("-", "_"))
option_value_rec = getattr(received_message, o.name.lower().replace("-", "_"))
self.assertEqual(option_value, option_value_rec)
client.stop()
def client_callback(self, response):
print("Callback")
self.queue.put(response)
def _test_plugtest(self, message_list): # pragma: no cover
serializer = Serializer()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for message, expected in message_list:
if message is not None:
datagram = serializer.serialize(message)
sock.sendto(datagram, message.destination)
if expected is not None:
datagram, source = sock.recvfrom(4096)
received_message = serializer.deserialize(datagram, source)
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, source)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.payload is not None:
self.assertEqual(received_message.payload, expected.payload)
if expected.options is not None:
self.assertEqual(received_message.options, expected.options)
for o in expected.options:
assert isinstance(o, Option)
option_value = getattr(expected, o.name.lower().replace("-", "_"))
option_value_rec = getattr(received_message, o.name.lower().replace("-", "_"))
self.assertEqual(option_value, option_value_rec)
sock.close()
def _test_datagram(self, message_list): # pragma: no cover
serializer = Serializer()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for message, expected in message_list:
if message is not None:
datagram, destination = message
sock.sendto(datagram, destination)
if expected is not None:
datagram, source = sock.recvfrom(4096)
received_message = serializer.deserialize(datagram, source)
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, source)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.payload is not None:
self.assertEqual(received_message.payload, expected.payload)
if expected.options is not None:
self.assertEqual(received_message.options, expected.options)
for o in expected.options:
assert isinstance(o, Option)
option_value = getattr(expected, o.name.lower().replace("-", "_"))
option_value_rec = getattr(received_message, o.name.lower().replace("-", "_"))
self.assertEqual(option_value, option_value_rec)
sock.close()
def test_not_allowed(self):
print("TEST_NOT_ALLOWED")
path = "/void"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.METHOD_NOT_ALLOWED.number
expected.token = None
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.METHOD_NOT_ALLOWED.number
expected.token = None
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.METHOD_NOT_ALLOWED.number
expected.token = None
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.DELETE.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.METHOD_NOT_ALLOWED.number
expected.token = None
exchange4 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1, exchange2, exchange3, exchange4])
# def test_separate(self):
# print "TEST_SEPARATE"
# path = "/separate"
# req = Request()
# req.code = defines.Codes.GET.number
# req.uri_path = path
# req.type = defines.Types["CON"]
# req._mid = self.current_mid
# req.destination = self.server_address
#
# expected = Response()
# expected.type = defines.Types["CON"]
# expected._mid = None
# expected.code = defines.Codes.CONTENT.number
# expected.token = None
# expected.max_age = 60
#
# exchange1 = (req, expected)
#
# self.current_mid += 1
#
# req = Request()
# req.code = defines.Codes.POST.number
# req.uri_path = path
# req.type = defines.Types["CON"]
# req._mid = self.current_mid
# req.destination = self.server_address
# req.payload = "POST"
#
# expected = Response()
# expected.type = defines.Types["CON"]
# expected._mid = None
# expected.code = defines.Codes.CHANGED.number
# expected.token = None
# expected.options = None
#
# exchange2 = (req, expected)
#
# self.current_mid += 1
#
# req = Request()
# req.code = defines.Codes.PUT.number
# req.uri_path = path
# req.type = defines.Types["CON"]
# req._mid = self.current_mid
# req.destination = self.server_address
# req.payload = "PUT"
#
# expected = Response()
# expected.type = defines.Types["CON"]
# expected._mid = None
# expected.code = defines.Codes.CHANGED.number
# expected.token = None
# expected.options = None
#
# exchange3 = (req, expected)
#
# self.current_mid += 1
#
# req = Request()
# req.code = defines.Codes.DELETE.number
# req.uri_path = path
# req.type = defines.Types["CON"]
# req._mid = self.current_mid
# req.destination = self.server_address
#
# expected = Response()
# expected.type = defines.Types["CON"]
# expected._mid = None
# expected.code = defines.Codes.DELETED.number
# expected.token = None
#
# exchange4 = (req, expected)
#
# self.current_mid += 1
# self._test_with_client([exchange1, exchange2, exchange3, exchange4])
def test_post(self):
print("TEST_POST")
path = "/storage/new_res?id=1"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "test"
req.add_if_none_match()
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = "storage/new_res"
expected.location_query = "id=1"
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = "/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.if_match = ["test", "not"]
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "test"
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.uri_path = "/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.if_match = ["not"]
req.payload = "not"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.PRECONDITION_FAILED.number
expected.token = None
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = "/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.if_match = ["not"]
req.payload = "not"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.PRECONDITION_FAILED.number
expected.token = None
exchange4 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.uri_path = "/storage/new_res"
req._mid = self.current_mid
req.destination = self.server_address
req.add_if_none_match()
req.payload = "not"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.PRECONDITION_FAILED.number
expected.token = None
exchange5 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1, exchange2, exchange3, exchange4, exchange5])
def test_post_block(self):
print("TEST_POST_BLOCK")
path = "/storage/new_res"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras sollicitudin fermentum ornare. " \
"Cras accumsan tellus quis dui lacinia eleifend. Proin ultrices rutrum orci vitae luctus. " \
"Nullam malesuada pretium elit, at aliquam odio vehicula in. Etiam nec maximus elit. " \
"Etiam at erat ac ex ornare feugiat. Curabitur sed malesuada orci, id aliquet nunc. Phasellus " \
"nec leo luctus, blandit lorem sit amet, interdum metus. Duis efficitur volutpat magna, ac " \
"ultricies nibh aliquet sit amet. Etiam tempor egestas augue in hendrerit. Nunc eget augue " \
"ultricies, dignissim lacus et, vulputate dolor. Nulla eros odio, fringilla vel massa ut, " \
"facilisis cursus quam. Fusce faucibus lobortis congue. Fusce consectetur porta neque, id " \
"sollicitudin velit maximus eu. Sed pharetra leo quam, vel finibus turpis cursus ac. " \
"Aenean ac nisi massa. Cras commodo arcu nec ante tristique ullamcorper. Quisque eu hendrerit" \
" urna. Cras fringilla eros ut nunc maximus, non porta nisl mollis. Aliquam in rutrum massa." \
" Praesent tristique turpis dui, at ultri"
req.block1 = (1, 1, 1024)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.REQUEST_ENTITY_INCOMPLETE.number
expected.token = None
expected.payload = None
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras sollicitudin fermentum ornare. " \
"Cras accumsan tellus quis dui lacinia eleifend. Proin ultrices rutrum orci vitae luctus. " \
"Nullam malesuada pretium elit, at aliquam odio vehicula in. Etiam nec maximus elit. " \
"Etiam at erat ac ex ornare feugiat. Curabitur sed malesuada orci, id aliquet nunc. Phasellus " \
"nec leo luctus, blandit lorem sit amet, interdum metus. Duis efficitur volutpat magna, ac " \
"ultricies nibh aliquet sit amet. Etiam tempor egestas augue in hendrerit. Nunc eget augue " \
"ultricies, dignissim lacus et, vulputate dolor. Nulla eros odio, fringilla vel massa ut, " \
"facilisis cursus quam. Fusce faucibus lobortis congue. Fusce consectetur porta neque, id " \
"sollicitudin velit maximus eu. Sed pharetra leo quam, vel finibus turpis cursus ac. " \
"Aenean ac nisi massa. Cras commodo arcu nec ante tristique ullamcorper. Quisque eu hendrerit" \
" urna. Cras fringilla eros ut nunc maximus, non porta nisl mollis. Aliquam in rutrum massa." \
" Praesent tristique turpis dui, at ultri"
req.block1 = (0, 1, 1024)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (0, 1, 1024)
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "a imperdiet nisl. Quisque a iaculis libero, id tempus lacus. Aenean convallis est non justo " \
"consectetur, a hendrerit enim consequat. In accumsan ante a egestas luctus. Etiam quis neque " \
"nec eros vestibulum faucibus. Nunc viverra ipsum lectus, vel scelerisque dui dictum a. Ut orci " \
"enim, ultrices a ultrices nec, pharetra in quam. Donec accumsan sit amet eros eget fermentum."
req.block1 = (1, 1, 64)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (1, 1, 64)
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "a imperdiet nisl. Quisque a iaculis libero, id tempus lacus. Aenean convallis est non justo " \
"consectetur, a hendrerit enim consequat. In accumsan ante a egestas luctus. Etiam quis neque " \
"nec eros vestibulum faucibus. Nunc viverra ipsum lectus, vel scelerisque dui dictum a. Ut orci " \
"enim, ultrices a ultrices nec, pharetra in quam. Donec accumsan sit amet eros eget fermentum."
req.block1 = (3, 1, 64)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.REQUEST_ENTITY_INCOMPLETE.number
expected.token = None
expected.payload = None
exchange4 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "a imperdiet nisl. Quisque a iaculis libero, id tempus lacus. Aenean convallis est non justo " \
"consectetur, a hendrerit enim consequat. In accumsan ante a egestas luctus. Etiam quis neque " \
"nec eros vestibulum faucibus. Nunc viverra ipsum lectus, vel scelerisque dui dictum a. Ut orci " \
"enim, ultrices a ultrices nec, pharetra in quam. Donec accumsan sit amet eros eget fermentum."
req.block1 = (2, 0, 64)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = "storage/new_res"
exchange5 = (req, expected)
self.current_mid += 1
self._test_plugtest([exchange1, exchange2, exchange3, exchange4, exchange5])
def test_get_block(self):
print("TEST_GET_BLOCK")
path = "/big"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (0, 0, 512)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (0, 1, 512)
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (1, 0, 256)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (1, 1, 256)
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (2, 0, 128)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (2, 1, 128)
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (3, 0, 64)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (3, 1, 64)
exchange4 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (4, 0, 32)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (4, 1, 32)
exchange5 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (5, 0, 16)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (5, 1, 16)
exchange6 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (6, 0, 1024)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (6, 1, 1024)
exchange7 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (7, 0, 1024)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (7, 0, 1024)
exchange8 = (req, expected)
self.current_mid += 1
self._test_plugtest([exchange1, exchange2, exchange3, exchange4, exchange5, exchange6, exchange7, exchange8])
def test_post_block_big(self):
print("TEST_POST_BLOCK_BIG")
path = "/big"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "Lorem ipsum dolo"
req.block1 = (0, 1, 16)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (0, 1, 16)
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "r sit amet, consectetur adipisci"
req.block1 = (1, 1, 32)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (1, 1, 32)
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "ng elit. Sed ut ultrices ligula. Pellentesque purus augue, cursu"
req.block1 = (2, 1, 64)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (2, 1, 64)
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "s ultricies est in, vehicula congue metus. Vestibulum vel justo lacinia, porttitor quam vitae, " \
"feugiat sapien. Quisque finibus, "
req.block1 = (3, 1, 128)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (3, 1, 128)
exchange4 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "nisi vitae rhoncus malesuada, augue mauris dapibus tellus, sit amet venenatis libero" \
" libero sed lorem. In pharetra turpis sed eros porta mollis. Quisque dictum dolor nisl," \
" imperdiet tincidunt augue malesuada vitae. Donec non felis urna. Suspendisse at hend"
req.block1 = (4, 1, 256)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (4, 1, 256)
exchange5 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "rerit ex, quis aliquet ante. Vivamus ultrices dolor at elit tincidunt, eget fringilla " \
"ligula vestibulum. In molestie sagittis nibh, ut efficitur tellus faucibus non. Maecenas " \
"posuere elementum faucibus. Morbi nisi diam, molestie non feugiat et, elementum eget magna." \
" Donec vel sem facilisis quam viverra ultrices nec eu lacus. Sed molestie nisi id ultrices " \
"interdum. Curabitur pharetra sed tellus in dignissim. Duis placerat aliquam metus, volutpat " \
"elementum augue aliquam a. Nunc sed dolor at orci maximus portt"
req.block1 = (5, 1, 512)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (5, 1, 512)
exchange6 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "itor ac sit amet eros. Mauris et nisi in tortor pharetra rhoncus sit amet hendrerit metus. " \
"Integer laoreet placerat cursus. Nam a nulla ex. Donec laoreet sagittis libero quis " \
"imperdiet. Vivamus facilisis turpis nec rhoncus venenatis. Duis pulvinar tellus vel quam " \
"maximus imperdiet. Mauris eget nibh orci. Duis ut cursus nibh. Nulla sed commodo elit. " \
"Suspendisse ac eros lacinia, mattis turpis at, porttitor justo. Vivamus molestie " \
"tincidunt libero. Etiam porttitor lacus odio, at lobortis tortor scelerisque nec. " \
"Nullam non ante vel nisi ultrices consectetur. Maecenas massa felis, tempor eget " \
"malesuada eget, pretium eu sapien. Vivamus dapibus ante erat, non faucibus orci sodales " \
"sit amet. Cras magna felis, sodales eget magna sed, eleifend rutrum ligula. Vivamus interdum " \
"enim enim, eu facilisis tortor dignissim quis. Ut metus nulla, mattis non lorem et, " \
"elementum ultrices orci. Quisque eleifend, arcu vitae ullamcorper pulvinar, ipsum ex " \
"sodales arcu, eget consectetur mauris metus ac tortor. Donec id sem felis. Maur"
req.block1 = (6, 0, 1024)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.payload = None
expected.location_path = "big"
exchange7 = (req, expected)
self.current_mid += 1
self._test_plugtest([exchange1, exchange2, exchange3, exchange4, exchange5, exchange6, exchange7])
def test_options(self):
print("TEST_OPTIONS")
path = "/storage/new_res"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
option = Option()
option.number = defines.OptionRegistry.ETAG.number
option.value = "test"
req.add_option(option)
req.del_option(option)
req.payload = "test"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = "storage/new_res"
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
option = Option()
option.number = defines.OptionRegistry.ETAG.number
option.value = "test"
req.add_option(option)
req.del_option_by_name("ETag")
req.payload = "test"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = "storage/new_res"
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
option = Option()
option.number = defines.OptionRegistry.ETAG.number
option.value = "test"
req.add_option(option)
del req.etag
req.payload = "test"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = "storage/new_res"
exchange3 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1, exchange2, exchange3])
# def test_long_options(self):
# """
# Test processing of options with extended length
# """
# print("TEST_LONG_OPTIONS")
#
# path = "/storage/"
# req = Request()
# req.code = defines.Codes.GET.number
# req.uri_path = path
# req.type = defines.Types["CON"]
# req._mid = self.current_mid
# req.destination = self.server_address
# option = Option()
# # This option should be silently ignored by the server
# # since it is not critical
# option.number = defines.OptionRegistry.RM_MESSAGE_SWITCHING.number
# option.value = "\1\1\1\1\0\0"
# options = req.options
# req.add_option(option)
# req.payload = "test"
#
# expected = Response()
# expected.type = defines.Types["ACK"]
# expected.code = defines.Codes.CONTENT.number
# expected.token = None
# expected.payload = None
#
# exchange1 = (req, expected)
# self.current_mid += 1
#
# self._test_with_client([exchange1])
#
# # This option (244) should be silently ignored by the server
# req = ("\x40\x01\x01\x01\xd6\xe7\x01\x01\x01\x01\x00\x00", self.server_address)
#
# expected = Response()
# expected.type = defines.Types["ACK"]
# expected._mid = None
# expected.code = defines.Codes.NOT_FOUND.number
# expected.token = None
# expected.payload = None
#
# exchange21 = (req, expected)
# self.current_mid += 1
#
# # This option (245) should cause BAD REQUEST, as unrecognizable critical
# req = ("\x40\x01\x01\x01\xd6\xe8\x01\x01\x01\x01\x00\x00", self.server_address)
#
# expected = Response()
# expected.type = defines.Types["RST"]
# expected._mid = None
# expected.code = defines.Codes.BAD_REQUEST.number
#
# exchange22 = (req, expected)
# self.current_mid += 1
#
# # This option (65525) should cause BAD REQUEST, as unrecognizable critical
# req = ("\x40\x01\x01\x01\xe6\xfe\xe8\x01\x01\x01\x01\x00\x00", self.server_address)
#
# expected = Response()
# expected.type = defines.Types["RST"]
# expected._mid = None
# expected.code = defines.Codes.BAD_REQUEST.number
#
# exchange23 = (req, expected)
# self.current_mid += 1
#
# self._test_datagram([exchange21, exchange22, exchange23])
def test_content_type(self):
print("TEST_CONTENT_TYPE")
path = "/storage/new_res"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "<value>test</value>"
req.content_type = defines.Content_types["application/xml"]
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = "storage/new_res"
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "Basic Resource"
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "test"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.payload = None
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "test"
exchange4 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.accept = defines.Content_types["application/xml"]
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "<value>test</value>"
expected.content_type = defines.Content_types["application/xml"]
exchange5 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.accept = defines.Content_types["application/json"]
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.NOT_ACCEPTABLE.number
expected.token = None
expected.payload = None
exchange6 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = "/xml"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "<value>0</value>"
print((expected.pretty_print()))
exchange7 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = "/encoding"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "0"
exchange8 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = "/encoding"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.accept = defines.Content_types["application/xml"]
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "<value>0</value>"
expected.content_type = defines.Content_types["application/xml"]
exchange9 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = "/encoding"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.accept = defines.Content_types["application/json"]
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "{'value': '0'}"
expected.content_type = defines.Content_types["application/json"]
exchange10 = (req, expected)
self.current_mid += 1
# self._test_with_client([exchange1, exchange2, exchange3, exchange4, exchange5, exchange6, exchange7,
# exchange8, exchange9, exchange10])
self._test_with_client([exchange1, exchange2, exchange3, exchange4, exchange5])
def test_ETAG(self):
print("TEST_ETAG")
path = "/etag"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "ETag resource"
expected.etag = bytes("0", "utf-8")
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "test"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.payload = None
expected.location_path = path
expected.etag = "1"
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.etag = bytes("1", "utf-8")
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.VALID.number
expected.token = None
expected.payload = "test"
expected.etag = bytes("1", "utf-8")
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "echo payload"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.payload = None
exchange4 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1, exchange2, exchange3, exchange4])
def test_child(self):
print("TEST_CHILD")
path = "/child"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "test"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = path
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "test"
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "testPUT"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.payload = None
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.DELETE.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.DELETED.number
expected.token = None
expected.payload = None
exchange4 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1, exchange2, exchange3, exchange4])
def test_not_found(self):
print("TEST_not_found")
path = "/not_found"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.token = 100
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.NOT_FOUND.number
expected.token = "100"
expected.payload = None
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "test"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.METHOD_NOT_ALLOWED.number
expected.token = None
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "testPUT"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.NOT_FOUND.number
expected.token = None
expected.payload = None
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.DELETE.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.NOT_FOUND.number
expected.token = None
expected.payload = None
exchange4 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1, exchange2, exchange3, exchange4])
# def test_invalid(self):
# print("TEST_INVALID")
#
# # version
# req = ("\x00\x01\x8c\xda", self.server_address)
#
# expected = Response()
# expected.type = defines.Types["RST"]
# expected._mid = None
# expected.code = defines.Codes.BAD_REQUEST.number
#
# exchange1 = (req, expected)
#
# # version
# req = ("\x40", self.server_address)
#
# expected = Response()
# expected.type = defines.Types["RST"]
# expected._mid = None
# expected.code = defines.Codes.BAD_REQUEST.number
#
# exchange2 = (req, expected)
#
# # code
# req = ("\x40\x05\x8c\xda", self.server_address)
#
# expected = Response()
# expected.type = defines.Types["RST"]
# expected._mid = None
# expected.code = defines.Codes.BAD_REQUEST.number
#
# exchange3 = (req, expected)
#
# # option
# req = ("\x40\x01\x8c\xda\x94", self.server_address)
#
# expected = Response()
# expected.type = defines.Types["RST"]
# expected._mid = None
# expected.code = defines.Codes.BAD_REQUEST.number
#
# exchange4 = (req, expected)
#
# # payload marker
# req = ("\x40\x02\x8c\xda\x75\x62\x61\x73\x69\x63\xff", self.server_address)
#
# expected = Response()
# expected.type = defines.Types["RST"]
# expected._mid = None
# expected.code = defines.Codes.BAD_REQUEST.number
#
# exchange5 = (req, expected)
#
# self._test_datagram([exchange1, exchange2, exchange3, exchange4, exchange5])
def test_post_block_big_client(self):
print("TEST_POST_BLOCK_BIG_CLIENT")
path = "/big"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras sollicitudin fermentum ornare. " \
"Cras accumsan tellus quis dui lacinia eleifend. Proin ultrices rutrum orci vitae luctus. " \
"Nullam malesuada pretium elit, at aliquam odio vehicula in. Etiam nec maximus elit. " \
"Etiam at erat ac ex ornare feugiat. Curabitur sed malesuada orci, id aliquet nunc. Phasellus " \
"nec leo luctus, blandit lorem sit amet, interdum metus. Duis efficitur volutpat magna, ac " \
"ultricies nibh aliquet sit amet. Etiam tempor egestas augue in hendrerit. Nunc eget augue " \
"ultricies, dignissim lacus et, vulputate dolor. Nulla eros odio, fringilla vel massa ut, " \
"facilisis cursus quam. Fusce faucibus lobortis congue. Fusce consectetur porta neque, id " \
"sollicitudin velit maximus eu. Sed pharetra leo quam, vel finibus turpis cursus ac. " \
"Aenean ac nisi massa. Cras commodo arcu nec ante tristique ullamcorper. Quisque eu hendrerit" \
" urna. Cras fringilla eros ut nunc maximus, non porta nisl mollis. Aliquam in rutrum massa." \
" Praesent tristique turpis dui, at ultricies lorem fermentum at. Vivamus sit amet ornare neque, " \
"a imperdiet nisl. Quisque a iaculis libero, id tempus lacus. Aenean convallis est non justo " \
"consectetur, a hendrerit enim consequat. In accumsan ante a egestas luctus. Etiam quis neque " \
"nec eros vestibulum faucibus. Nunc viverra ipsum lectus, vel scelerisque dui dictum a. Ut orci " \
"enim, ultrices a ultrices nec, pharetra in quam. Donec accumsan sit amet eros eget fermentum." \
"Vivamus ut odio ac odio malesuada accumsan. Aenean vehicula diam at tempus ornare. Phasellus " \
"dictum mauris a mi consequat, vitae mattis nulla fringilla. Ut laoreet tellus in nisl efficitur," \
" a luctus justo tempus. Fusce finibus libero eget velit finibus iaculis. Morbi rhoncus purus " \
"vel vestibulum ullamcorper. Sed ac metus in urna fermentum feugiat. Nulla nunc diam, sodales " \
"aliquam mi id, varius porta nisl. Praesent vel nibh ac turpis rutrum laoreet at non odio. " \
"Phasellus ut posuere mi. Suspendisse malesuada velit nec mauris convallis porta. Vivamus " \
"sed ultrices sapien, at cras amet."
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.payload = None
exchange1 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1])
def test_observe_client(self):
print("TEST_OBSERVE_CLIENT")
path = "/basic"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.observe = 0
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.observe = 1
exchange1 = (req, expected)
req = Message()
req.code = defines.Codes.EMPTY.number
req.uri_path = path
req.type = defines.Types["RST"]
req._mid = self.current_mid
req.destination = self.server_address
exchange2 = (req, None)
self.current_mid += 1
self._test_with_client_observe([exchange1, exchange2])
if __name__ == '__main__':
unittest.main()
|
board.py
|
from connection import SerialConnection, TelnetConnection, ConnectionError
from fileops import set_fileops_params
from autobool import AutoBool
from printing import dprint, eprint, qprint
import printing
from threading import Thread
import time
import inspect
import traceback
import os
from blessed import Terminal
QUIT_REPL_CHAR = 'X'
QUIT_REPL_BYTE = bytes((ord(QUIT_REPL_CHAR) - ord('@'),)) # Control-X
class BoardError(Exception):
"""Errors relating to board connections"""
def __init__(self, msg):
super().__init__(msg)
class Board(object):
"""Serial or telnet connection to a MicroPython REPL"""
STATUS_UNKNOWN = 1
STATUS_RAW_REPL = 2
STATUS_NORMAL_REPL = 3
def __init__(self, config):
"""New instance. Call open_XXX to establish connection."""
self._config = config
self._serial = None
self._id = None
self._has_buffer = False
self._root_dirs = []
# repl status (raw/normal/unknown)
self._status = self.STATUS_UNKNOWN
###################################################################
# connection
def connect_serial(self, port, baudrate):
"""Connect to board via serial connection"""
self._serial = SerialConnection(port, baudrate)
self._board_characteristics()
if not self.connected:
raise BoardError("Failed to establish connection to board at '{}'".format(port))
def connect_telnet(self, ip, user, password):
"""Connect via telnet"""
self._serial = TelnetConnection(ip, user, password)
self._board_characteristics()
if not self.connected:
raise BoardError("Failed to establish connection to board at '{}'".format(ip))
def _board_characteristics(self):
"""Get device id and other updates"""
# get unique board id
self._id = self.remote_eval(get_unique_id, 'BOARD HAS NO ID')
qprint("Connected to '{}' (id={}) ...".format(self.name, self.id), end='', flush=True)
# check buffer
self._has_buffer = self.remote_eval(test_buffer)
qprint(" has_buffer={}".format(self._has_buffer), end='', flush=True)
if self._serial.is_circuit_python:
qprint()
else:
# get root dirs
qprint("{} dirs=".format(self._has_buffer), end='', flush=True)
self._root_dirs = ['/{}/'.format(dir) for dir in self.remote_eval(listroot)]
qprint(self._root_dirs, end='', flush=True)
if not self.get_config('mac'):
qprint(" mac=", end='', flush=True)
self.set_config('mac', self.remote_eval(get_mac_address))
qprint(self.get_config('mac'), end='', flush=True)
# sync time
now = time.localtime(time.time())
qprint(" sync time ...")
self.remote(set_time, now.tm_year, now.tm_mon, now.tm_mday,
now.tm_hour, now.tm_min, now.tm_sec)
qprint()
def disconnect(self):
"""Disconnect and release port / ip"""
dprint("Disconnecting board", self._id)
self._id = None
self._root_dirs = []
if self._serial:
self._serial.close()
self._serial = None
@property
def connected(self):
"""Connected to a MicroPython REPL"""
return self._serial and self._serial.connected
###################################################################
# id & configuration
@property
def id(self):
"""Unique id of this board"""
if not self._id:
raise BoardError("board has no id")
return self._id
@property
def address(self):
"""Board serial port or ip address"""
return self._serial.address
def match(self, spec):
"""Board matches spec: id, name, port, ip address or URL"""
if self.id == spec: return True
if self.name == spec: return True
return self._serial.match(spec)
@property
def is_telnet(self):
"""Board is connected via telnet"""
return self._serial.is_telnet
@property
def name(self):
"""Get board name"""
return self.get_config('name', 'py')
@property
def name_path(self):
"""Path prefix for this board."""
return '/{}/'.format(self.name)
@name.setter
def name(self, name):
"""Set board name. Note: stored only locally, not uploaded to board!"""
self.set_config('name', name)
def get_config(self, option, default=None):
"""Get configuration value"""
return self._config.get(self._id, option, default)
def set_config(self, option, value):
"""Set configuration value"""
self._config.set(self._id, option, value)
def remove_config_option(self, option):
"""Remove option from board config"""
self._config.remove(self._id, option)
def config_options(self):
"""List of all options of this board"""
return self._config.options(self._id)
def config_string(self):
return self._config.config_string(self._id)
def is_root_path(self, filename):
"""Determines if 'filename' corresponds to a directory on this device."""
test_filename = filename + '/'
for root_dir in self._root_dirs:
if test_filename.startswith(root_dir):
return True
return False
@property
def root_dirs(self):
"""List of root directories on this board."""
return self._root_dirs
@property
def has_buffer(self):
"""Board upy io has buffer"""
return self._has_buffer
def write(self, bytes):
"""Send bytes to board"""
self._serial.write(bytes)
def read(self, len):
"""Read bytes from board"""
return self._serial.read(len)
###################################################################
# repl and remote execution
@property
def repl_status(self):
"""Board repl status"""
return self._status
def enter_raw_repl(self):
"""Enter raw repl if not already in this mode."""
if self._serial.is_circuit_python:
self.enter_raw_repl_cp()
else:
self.enter_raw_repl_mp()
def enter_raw_repl_cp(self):
"""Enter raw repl if not already in this mode for CIRCUITPYTHON."""
# Ctrl-C twice: interrupt any running program
dprint("^C, abort running program")
self._serial.write(b'\r\x03\x03')
# Ctrl-A: enter raw REPL
dprint("^A, raw repl")
self._serial.write(b'\r\x01')
expect = b"raw REPL; CTRL-B to exit"
data = self._serial.read_until(1, expect)
if not data.endswith(expect):
raise BoardError('Cannot enter raw repl: expected {}, got {}'.format(expect, data))
expect = b"\r\n"
data = self._serial.read_until(1, expect)
if not data.endswith(expect):
raise BoardError('Cannot enter raw repl: expected {}, got {}'.format(expect, data))
def enter_raw_repl_mp(self):
"""Enter raw repl if not already in this mode for MICROPYTHON."""
dprint("^B^C, abort running program")
self._serial.write(b'\r\x02\x03')
time.sleep(.1)
# Attempt to get to REPL prompt, send Ctrl-C on failure.
expect = b'> '
abort = True
for attempt in range(3):
try:
self._serial.read_until(1, expect)
abort = False
break
except ConnectionError as err:
dprint('ConnectionError: {0}'.format(err))
self._serial.write(b'\x03')
time.sleep(1)
# Kickout if 3rd attempt fails
if abort:
raise ConnectionError('Failed to enter raw REPL')
time.sleep(.1)
# Ctrl-A: enter raw REPL
dprint("^A, raw repl")
self._serial.write(b'\r\x01')
expect = b'raw REPL; CTRL-B to exit\r\n'
data = self._serial.read_until(1, expect)
if not data.endswith(expect):
raise BoardError('Cannot enter raw repl: expected {}, got {}'.format(expect, data))
# determine required steps
# Note 1: no soft reset breaks telnet connection
# Note 2: if user pressed reset button, mode status is STATUS_NORMAL_REPL
# but shell49 won't know it. Hence we cannot assume RAW_REPL.
# BUT soft reset is not required.
if self.is_telnet or self._status == self.STATUS_RAW_REPL:
dprint("enter_raw_repl: already in RAW REPL state, no action")
return
# Ctrl-D: soft reset
dprint("^D, soft reset")
self._serial.write(b'\x04')
expect = b'soft reboot\r\n'
data = self._serial.read_until(1, expect)
if not data.endswith(expect):
raise BoardError('Could not do soft reset: expected {}, got {}'.format(expect, data))
# By splitting this into 2 reads, it allows boot.py to print stuff,
# which will show up after the soft reboot and before the raw REPL.
# The next read_until takes ~0.8 seconds (on ESP32)
expect = b'raw REPL; CTRL-B to exit\r\n'
data = self._serial.read_until(1, expect)
if not data.endswith(expect):
raise BoardError('Soft reset failed: expected {}, got {}'.format(expect, data))
# update board status
self._status = self.STATUS_RAW_REPL
dprint("in raw repl")
def exit_raw_repl(self):
"""Enter friendly (normal) repl."""
# Ctrl-B: enter friendly REPL
self._serial.write(b'\r\x02')
self._status = self.STATUS_NORMAL_REPL
def _exec_no_output(self, cmd, data_consumer=None, timeout=10):
"""Send command (string or bytes) to board for execution.
Pass board output to data_consumer (e.g. print).
no_output ... won't get execution output."""
if isinstance(cmd, str):
cmd = bytes(cmd, encoding='utf-8')
dprint()
dprint("_exec_no_output:", cmd.decode('utf-8')[:20])
# enter raw repl (if needed) and check if we have a prompt
self.enter_raw_repl()
dprint("wait for >")
data = self._serial.read_until(1, b'>', timeout=1)
if not data.endswith(b'>'):
raise BoardError("Cannot get response from board")
# send command to board
for i in range(0, len(cmd), 256):
self._serial.write(cmd[i:min(i + 256, len(cmd))])
time.sleep(0.01)
# execute command
self._serial.write(b'\x04')
# check if successful
if self._serial.read(2) != b'OK':
self._status = self.STATUS_UNKNOWN
raise BoardError("Could not exec '{} ...'".format(cmd.decode('utf-8').partition('\n')[0]))
def _exec_output(self, data_consumer=None, timeout=10):
"""Read output after exec_no_output"""
# self._serial.write(b'123\r')
data = self._serial.read_until(1, b'\x04', timeout=timeout, data_consumer=data_consumer)
if not data.endswith(b'\x04'):
raise BoardError('_exec_output expected 1st EOF, got "{}"'.format(data))
data = data[:-1]
# wait for error output
data_err = self._serial.read_until(1, b'\x04', timeout=timeout)
if not data_err.endswith(b'\x04'):
raise BoardError('_exec_output expected 2nd EOF, got "{}"'.format(data))
data_err = data_err[:-1]
if data_err:
self._status = self.STATUS_UNKNOWN
raise BoardError("Exec -> {}".format(data_err.decode('utf-8')))
# return result
return data
def exec(self, cmd, *, data_consumer=None, timeout=10):
"""Send cmd (str or bytes) to board for execution and return result."""
try:
self._exec_no_output(cmd, data_consumer, timeout)
return self._exec_output(data_consumer, timeout)
except ConnectionError:
self.disconnect()
raise
def execfile(self, filename, *, data_consumer=None, timeout=10):
"""Exec file on remote board return results.
Also passes output to data_consumer as they become available.
Timeout None disables timeout.
"""
with open(os.path.expanduser(filename), 'rb') as f:
cmds = f.read()
try:
self.exec(cmds, data_consumer=data_consumer, timeout=timeout)
finally:
self._status = self.STATUS_UNKNOWN
###################################################################
# remote function call
def _remote_repr(self, i):
"""Helper function to deal with types which we can't send to the pyboard."""
repr_str = repr(i)
if repr_str and repr_str[0] == '<':
return 'None'
return repr_str
def remote(self, func, *args, xfer_func=None, **kwargs):
"""Call func with args on the micropython board."""
has_buffer = self._has_buffer
buffer_size = self.get_config('buffer_size', default=128)
time_offset = self.get_config('time_offset', default=946684800)
set_fileops_params(has_buffer, buffer_size, time_offset)
args_arr = [self._remote_repr(i) for i in args]
kwargs_arr = ["{}={}".format(k, self._remote_repr(v)) for k, v in kwargs.items()]
func_str = inspect.getsource(func)
func_str += 'output = ' + func.__name__ + '('
func_str += ', '.join(args_arr + kwargs_arr)
func_str += ')\n'
func_str += 'if output is None:\n'
func_str += ' print("None")\n'
func_str += 'else:\n'
func_str += ' print(output)\n'
func_str = func_str.replace('TIME_OFFSET', '{}'.format(time_offset))
func_str = func_str.replace('HAS_BUFFER', '{}'.format(has_buffer))
func_str = func_str.replace('BUFFER_SIZE', '{}'.format(buffer_size))
func_str = func_str.replace('IS_UPY', 'True')
start_time = time.time()
output = self._exec_no_output(func_str)
if xfer_func:
xfer_func(self, *args, **kwargs)
output = self._exec_output()
dprint("remote: {}({}) --> {}, in {:.3} s)".format(
func.__name__,
repr(args)[1:-1],
output,
time.time()-start_time))
return output
def remote_eval(self, func, *args, **kwargs):
"""Calls func with the indicated args on the micropython board, and
converts the response back into python by using eval.
"""
res = self.remote(func, *args, **kwargs)
try:
return eval(res)
except (SyntaxError, ValueError) as e:
eprint("*** remote_eval({}, {}, {}) -> \n{} is not valid python code".format(
func.__name__, args, kwargs, res.decode('utf-8')))
return None
###################################################################
# repl
def _repl_serial(self, serial_ok):
"""Thread, copies bytes from serial to out"""
term = Terminal()
try:
with serial_ok, term.raw():
save_timeout = self._serial.timeout
# Set a timeout so that the read returns periodically with no data
# and allows us to check whether the main thread wants us to quit.
self._serial.timeout = 0.4
while not self._quit_serial_reader:
char = self._serial.read(1)
if char.decode('utf-8') != '':
print(char.decode('utf-8'), end='', flush=True)
self._serial.timeout = save_timeout
except ConnectionError as e:
self.disconnect()
print('\r')
eprint(str(e).replace('\n', '\r'))
except Exception:
# catchall, print error traceback
from io import StringIO
s = StringIO()
print('\r', printing.ERR_COLOR)
traceback.print_exc(file=s)
eprint(s.getvalue().replace('\n', '\r'))
def repl(self, getch):
self.exit_raw_repl()
# who knows what state we are in after repl?
serial_ok = AutoBool()
self._quit_serial_reader = False
repl_thread = Thread(target=self._repl_serial, args=[serial_ok], name="REPL")
repl_thread.daemon = True
repl_thread.start()
# wait for reader to start
while not serial_ok(): pass
try:
# Wake up the prompt
self._serial.write(b'\r')
while serial_ok():
char = getch()
if not char: continue
if char == QUIT_REPL_BYTE:
self._quit_serial_reader = True
# needed by some boards, e.g. WiPy
self._serial.write(b' ')
# wait for reader thread to notice
time.sleep(0.5)
# print newline so the shell49 prompt looks good
print('\n')
# stay in the loop until the reader thread is quitting
continue
if char == b'\n':
char = b'\r'
self._serial.write(char)
except (AttributeError, BoardError):
# Board no longer present?
self.disconnect()
print('\n')
###################################################################
# remote operations, these run on the uPy board
def get_unique_id(default):
"""Inquire the boards unique id."""
try:
from microcontroller import cpu
from binascii import hexlify
uid = hexlify(cpu.uid).decode('ascii')
except:
try:
from machine import unique_id
from binascii import hexlify
uid = hexlify(unique_id()).decode('ascii')
except:
uid = default
return repr(uid)
def listroot():
"""Return list of filenames contained in root directory."""
import os
return os.listdir('/')
def get_mac_address():
try:
from binascii import hexlify
from network import WLAN, STA_IF
mac = hexlify(WLAN(STA_IF).config('mac'), ':').decode('ascii')
except:
mac = None
return repr(mac)
def set_time(y, m, d, h, min, s):
"""Set time on upy board."""
rtc = None
try:
import pyb
rtc = pyb.RTC()
rtc.datetime((y, m, d, None, h, min, s))
return rtc.datetime()
except:
try:
import machine
rtc = machine.RTC()
if not rtc.synced():
try:
rtc.datetime((y, m, d, None, h, min, s))
return rtc.datetime()
except:
rtc.init((y, m, d, h, min, s))
return rtc.now()
except:
return None
def test_buffer():
"""Check micropython firmware to see if sys.stdin.buffer exists."""
import sys
try:
return sys.stdin.buffer != None
except:
return False
###################################################################
# test code
def data_consumer(bytes):
print(bytes.decode('utf-8'), end='')
def putch(byte):
print(byte.decode('utf-8'), end='', flush=True)
def main():
from . config import Config
from . timeit import Timeit
port = "/dev/cu.SLAB_USBtoUART"
baudrate = 115200
config = Config('~/Dropbox/Files/Class/49/.shell49_rc.py')
with Timeit() as t:
board = Board(config)
board.connect_serial(port, baudrate)
print("*** Connecting to board took {:0.3f} seconds".format(t.interval))
print(board.exec("print(2**100)", data_consumer=data_consumer).decode('utf-8'))
print(board.remote(get_unique_id, "no_id"))
if False:
from . getch import getch
board.repl(getch, putch)
print(board.exec("print(2**30)").decode('utf-8'))
print(board.remote(get_unique_id, "no_id"))
print(board.exec("print(2**80)").decode('utf-8'))
|
_client_application.py
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example gRPC Python-using client-side application."""
import collections
import enum
import threading
import time
import grpc
from tests.unit.framework.common import test_constants
from tests.testing.proto import requests_pb2
from tests.testing.proto import services_pb2
from tests.testing.proto import services_pb2_grpc
from tests.testing import _application_common
@enum.unique
class Scenario(enum.Enum):
UNARY_UNARY = 'unary unary'
UNARY_STREAM = 'unary stream'
STREAM_UNARY = 'stream unary'
STREAM_STREAM = 'stream stream'
CONCURRENT_STREAM_UNARY = 'concurrent stream unary'
CONCURRENT_STREAM_STREAM = 'concurrent stream stream'
CANCEL_UNARY_UNARY = 'cancel unary unary'
CANCEL_UNARY_STREAM = 'cancel unary stream'
INFINITE_REQUEST_STREAM = 'infinite request stream'
class Outcome(collections.namedtuple('Outcome', ('kind', 'code', 'details'))):
"""Outcome of a client application scenario.
Attributes:
kind: A Kind value describing the overall kind of scenario execution.
code: A grpc.StatusCode value. Only valid if kind is Kind.RPC_ERROR.
details: A status details string. Only valid if kind is Kind.RPC_ERROR.
"""
@enum.unique
class Kind(enum.Enum):
SATISFACTORY = 'satisfactory'
UNSATISFACTORY = 'unsatisfactory'
RPC_ERROR = 'rpc error'
_SATISFACTORY_OUTCOME = Outcome(Outcome.Kind.SATISFACTORY, None, None)
_UNSATISFACTORY_OUTCOME = Outcome(Outcome.Kind.UNSATISFACTORY, None, None)
class _Pipe(object):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._open = True
def __iter__(self):
return self
def _next(self):
with self._condition:
while True:
if self._values:
return self._values.pop(0)
elif not self._open:
raise StopIteration()
else:
self._condition.wait()
def __next__(self): # (Python 3 Iterator Protocol)
return self._next()
def next(self): # (Python 2 Iterator Protocol)
return self._next()
def add(self, value):
with self._condition:
self._values.append(value)
self._condition.notify_all()
def close(self):
with self._condition:
self._open = False
self._condition.notify_all()
def _run_unary_unary(stub):
response = stub.UnUn(_application_common.UNARY_UNARY_REQUEST)
if _application_common.UNARY_UNARY_RESPONSE == response:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_unary_stream(stub):
response_iterator = stub.UnStre(_application_common.UNARY_STREAM_REQUEST)
try:
next(response_iterator)
except StopIteration:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_stream_unary(stub):
response, call = stub.StreUn.with_call(
iter((_application_common.STREAM_UNARY_REQUEST,) * 3))
if (_application_common.STREAM_UNARY_RESPONSE == response and
call.code() is grpc.StatusCode.OK):
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_stream_stream(stub):
request_pipe = _Pipe()
response_iterator = stub.StreStre(iter(request_pipe))
request_pipe.add(_application_common.STREAM_STREAM_REQUEST)
first_responses = next(response_iterator), next(response_iterator)
request_pipe.add(_application_common.STREAM_STREAM_REQUEST)
second_responses = next(response_iterator), next(response_iterator)
request_pipe.close()
try:
next(response_iterator)
except StopIteration:
unexpected_extra_response = False
else:
unexpected_extra_response = True
if (first_responses == _application_common.TWO_STREAM_STREAM_RESPONSES and
second_responses == _application_common.TWO_STREAM_STREAM_RESPONSES
and not unexpected_extra_response):
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_concurrent_stream_unary(stub):
future_calls = tuple(
stub.StreUn.future(iter((_application_common.STREAM_UNARY_REQUEST,) *
3))
for _ in range(test_constants.THREAD_CONCURRENCY))
for future_call in future_calls:
if future_call.code() is grpc.StatusCode.OK:
response = future_call.result()
if _application_common.STREAM_UNARY_RESPONSE != response:
return _UNSATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
else:
return _SATISFACTORY_OUTCOME
def _run_concurrent_stream_stream(stub):
condition = threading.Condition()
outcomes = [None] * test_constants.RPC_CONCURRENCY
def run_stream_stream(index):
outcome = _run_stream_stream(stub)
with condition:
outcomes[index] = outcome
condition.notify()
for index in range(test_constants.RPC_CONCURRENCY):
thread = threading.Thread(target=run_stream_stream, args=(index,))
thread.start()
with condition:
while True:
if all(outcomes):
for outcome in outcomes:
if outcome.kind is not Outcome.Kind.SATISFACTORY:
return _UNSATISFACTORY_OUTCOME
else:
return _SATISFACTORY_OUTCOME
else:
condition.wait()
def _run_cancel_unary_unary(stub):
response_future_call = stub.UnUn.future(
_application_common.UNARY_UNARY_REQUEST)
initial_metadata = response_future_call.initial_metadata()
cancelled = response_future_call.cancel()
if initial_metadata is not None and cancelled:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_infinite_request_stream(stub):
def infinite_request_iterator():
while True:
yield _application_common.STREAM_UNARY_REQUEST
response_future_call = stub.StreUn.future(
infinite_request_iterator(),
timeout=_application_common.INFINITE_REQUEST_STREAM_TIMEOUT)
if response_future_call.code() is grpc.StatusCode.DEADLINE_EXCEEDED:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
_IMPLEMENTATIONS = {
Scenario.UNARY_UNARY: _run_unary_unary,
Scenario.UNARY_STREAM: _run_unary_stream,
Scenario.STREAM_UNARY: _run_stream_unary,
Scenario.STREAM_STREAM: _run_stream_stream,
Scenario.CONCURRENT_STREAM_UNARY: _run_concurrent_stream_unary,
Scenario.CONCURRENT_STREAM_STREAM: _run_concurrent_stream_stream,
Scenario.CANCEL_UNARY_UNARY: _run_cancel_unary_unary,
Scenario.INFINITE_REQUEST_STREAM: _run_infinite_request_stream,
}
def run(scenario, channel):
stub = services_pb2_grpc.FirstServiceStub(channel)
try:
return _IMPLEMENTATIONS[scenario](stub)
except grpc.RpcError as rpc_error:
return Outcome(Outcome.Kind.RPC_ERROR, rpc_error.code(),
rpc_error.details())
|
ex6_threads_show_ver.py
|
#!/usr/bin/env python
'''
Use threads and Netmiko to connect to each of the devices in the database. Execute
'show version' on each device. Record the amount of time required to do this.
'''
from __future__ import print_function, unicode_literals
from netmiko import ConnectHandler
from datetime import datetime
import threading
import django
django.setup()
from net_system.models import NetworkDevice # noqa
def show_version(a_device):
'''
Execute show version command using Netmiko
'''
creds = a_device.credentials
remote_conn = ConnectHandler(device_type=a_device.device_type,
ip=a_device.ip_address,
username=creds.username,
password=creds.password,
port=a_device.port, secret='')
print()
print('#' * 80)
print(remote_conn.send_command_expect("show version"))
print('#' * 80)
print()
remote_conn.disconnect()
def main():
'''
Use threads and Netmiko to connect to each of the devices in the database. Execute
'show version' on each device. Record the amount of time required to do this.
'''
start_time = datetime.now()
devices = NetworkDevice.objects.all()
for a_device in devices:
my_thread = threading.Thread(target=show_version, args=(a_device,))
my_thread.start()
main_thread = threading.currentThread()
for some_thread in threading.enumerate():
if some_thread != main_thread:
print(some_thread)
some_thread.join()
print("\nElapsed time: " + str(datetime.now() - start_time))
if __name__ == "__main__":
main()
|
detect_module.py
|
import tkinter as tk
from tkinter import filedialog
import os
from pathlib import Path
import platform # Linux vs Windows check
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib import style
from matplotlib.figure import Figure
import data_files.detect_processor as dtect_proc
from data_files.detect_processor import SpotDetector
from data_files.detect_processor import Spectrum
import data_files.detect_processor as detect
import data_files.safari_input as safari_input
import data_files.esa_data as esa_data
import spec_files.load_spec as load_spec
import spec_files.fit_esa as esa
from spec_files.load_spec import Spec
import traj_files.plot_traj as plot_traj
import misc.crystalview as crystalview
from misc.module import Menu
from misc.module import Module
import threading
global root_path
root_path = os.path.expanduser(".")
if platform.system() == 'Windows':
font_12 = ('Times New Roman', 12)
font_14 = ('Times New Roman', 14)
font_16 = ('Times New Roman', 16)
font_18 = ('Times New Roman', 18)
font_20 = ('Times New Roman', 20)
plt.rcParams.update({'font.family': 'Times New Roman'})
else:
font_12 = ('DejaVu Sans', 12)
font_14 = ('DejaVu Sans', 14)
font_16 = ('DejaVu Sans', 16)
font_18 = ('DejaVu Sans', 18)
font_20 = ('DejaVu Sans', 20)
plt.rcParams.update({'font.family': 'DejaVu Sans'})
plt.rcParams.update({'font.size': 18})
class Limits:
def __init__(self):
# Names of the values, for showing in the options box
self._names_ = {
't_min':"Min Theta: ",
't_max':"Max Theta: ",
'p_min':"Min Phi: ",
'p_max':"Max Phi: ",
'e_min':"Min Energy: ",
'e_max':"Max Energy: "
}
# Units to go with the value, use `` if no units
self._units_ = {
't_min':"Degrees",
't_max':"Degrees",
'p_min':"Degrees",
'p_max':"Degrees",
'e_min':"eV",
'e_max':"eV"
}
self.p_min = 0
self.p_max = 0
self.e_min = 0
self.e_max = 0
self.t_min = 0
self.t_max = 90
self.last_phi = 600
self.last_e = -1
# A help string to show in the help menu
self.help_text = ' General settings for detector limits:\n\n'+\
' Min Theta: Minimum outgoing theta-angle for particles (Degrees)\n'+\
' Max Theta: Maximum outgoing theta-angle for particles (Degrees)\n'+\
' Min Phi: Minimum outgoing phi-angle for particles (Degrees)\n'+\
' Max Phi: Maximum outgoing phi-angle for particles (Degrees)\n'+\
' Min Energy: Minimum outgoing energy for particles (eV)\n'+\
' Max Energy: Maximum outgoing energy for particles (eV)\n\n'+\
' Clicking Update will apply the changes and attempt to re-plot if applicable\n'+\
' Clicking Cancel will close the window without applying changes'
# This is the label to click to ge the above help text,
# this is also used for the label in the settings dropdown
self._label = 'Detector Limits'
# If this is set to a function, it will be called whenever
# the settings have been changed via a gui interaction
self._callback = None
class DetectSettings:
def __init__(self):
# Names of the values, for showing in the options box
self._names_ = {
'theta':'Theta: ',
'phi':"Phi: ",
'asize':"Angular Size: ",
'esize':"Energy Res: "
}
# Units to go with the value, use `` if no units
self._units_ = {
'theta':'Degrees',
'phi':"Degrees",
'asize':"Degrees",
'esize':"eV"
}
self.theta = 45
self.phi = 0
self.asize = 1
self.esize = 1
# A help string to show in the help menu
self.help_text = ' General settings for detector position and resolution:\n\n'+\
' Theta: elevation angle for the detector, measured from normal (Degrees)\n'+\
' Phi: azimuthal angle for detector (Degrees)\n'+\
' Angular Size: spatial size of detector (Degrees)\n'+\
' Energy Res: gaussian bin width for detector (eV)\n\n'+\
' Clicking Update will apply the changes and attempt to re-plot if applicable\n'+\
' Clicking Cancel will close the window without applying changes'
# This is the label to click to ge the above help text,
# this is also used for the label in the settings dropdown
self._label = 'Detector Settings'
# If this is set to a function, it will be called whenever
# the settings have been changed via a gui interaction
self._callback = None
class CompSettings:
def __init__(self):
# Names of the values, for showing in the options box
self._names_ = {
'scale_by_E':'Apply E Scaling: ',
'normalise':"Normalise Data: "
}
# Units to go with the value, use `` if no units
self._units_ = {
'scale_by_E':'',
'normalise':''
}
self.scale_by_E = True
self.normalise = True
# A help string to show in the help menu
self.help_text = ' Data Settings:\n\n'+\
' Apply E Scaling: If checked, intensity is scaled inversely with energy\n'+\
' Normalise Data: If checked, the data is normalised'
# This is the label to click to ge the above help text,
# this is also used for the label in the settings dropdown
self._label = 'Data Settings'
# If this is set to a function, it will be called whenever
# the settings have been changed via a gui interaction
self._callback = None
class TrajSettings:
def __init__(self):
# Names of the values, for showing in the options box
self._names_ = {
'show_lattice':'Show Lattice: ',
}
# Units to go with the value, use `` if no units
self._units_ = {
'show_lattice':''
}
self.show_lattice = False
# A help string to show in the help menu
self.help_text = ' Traj Settings:\n\n'+\
' Show Lattice: If checked, the lattice will be shown as well'
# This is the label to click to ge the above help text,
# this is also used for the label in the settings dropdown
self._label = 'Traj Settings'
# If this is set to a function, it will be called whenever
# the settings have been changed via a gui interaction
self._callback = None
class DetectModule(Module):
def __init__(self, root):
Module.__init__(self, root)
# This is the gui instance that owns this module.
self.base_name = "SAFARI Detect"
self.last_run = None
self.canvas = None
self.toolbar = None
self.dataset = None
self.detector = SpotDetector(45,0,1)
self.dsettings = DetectSettings()
self.limits = Limits()
self.comp_setitngs = CompSettings()
self.traj_settings = TrajSettings()
self.dsettings._callback = self.options_callback
self.limits._callback = self.options_callback
self.safio_file = None
self.traj_file = None
self.compare_esa_file = None
self.comparison_file = None
self.fig, self.prep_fig = None, None
self.waiting = False
self.single_shots = {}
def on_start(self):
# This is called when the module is first added, after making the settings,
# menus, etc.
self.get_tk().after(500, self.check_figs)
self.get_tk().after(500, self.check_single_shot)
def on_stop(self):
# This is called when the program is exited
print("Closing")
def get_settings(self):
# Return an array or collection of settings here
# Module can have more than 1 set of settings.
return [self.dsettings, self.limits, self.comp_setitngs, self.traj_settings]
def get_menus(self):
# This returns what menus (except for settings) should be made for this module
# "File" menu
_file_menu = Menu()
_file_menu._options["select_dbug_input"] = lambda: self.select_file()
_file_menu._options["select_comp_data"] = lambda: self.select_data()
_file_menu._options["select_traj_file"] = lambda: self.select_traj_file()
_file_menu._opts_order.append("select_dbug_input")
_file_menu._opts_order.append("select_comp_data")
_file_menu._opts_order.append("select_traj_file")
_file_menu._labels["select_dbug_input"] = "Select File"
_file_menu._labels["select_comp_data"] = "Select Comparison Data"
_file_menu._labels["select_traj_file"] = "Select Traj"
dbug_input_info = ' Select a .input or .dbug file for the run.\n\n'+\
' This is used for Intensity vs. Energy plots,\n'+\
' Impact Plots, and Energy vs. Theta plots\n\n'
comp_data_info = ' Select a .dat or .txt file containing a spectrum.\n\n'+\
' This is used for Intensity vs. Energy plots,\n'+\
' Impact Plots, and Energy vs. Theta plots\n\n'
traj_file_info = ' Select a .traj file for inspecting single shot runs.'
_file_menu._helps["select_dbug_input"] = dbug_input_info
_file_menu._helps["select_comp_data"] = comp_data_info
_file_menu._helps["select_traj_file"] = traj_file_info
_file_menu._label = "File"
# "Plot" menu
_plot_menu = Menu()
# Add some options, the value is the function to run on click
_plot_menu._options["i_vs_e_plot"] = lambda: self.i_vs_e_plot()
_plot_menu._options["impact_plot"] = lambda: self.impact_plot()
_plot_menu._options["e_vs_t_plot"] = lambda: self.e_vs_t_plot(fit=False)
_plot_menu._options["e_vs_t_plot_fit"] = lambda: self.e_vs_t_plot(fit=True)
_plot_menu._options["traj_energy_plot"] = lambda: self.traj_energy_plot()
_plot_menu._options["traj_power_plot"] = lambda: self.traj_power_plot()
_plot_menu._options["traj_plot"] = lambda: self.traj_plot()
_plot_menu._options["crystal_plot"] = lambda: self.crystal_plot()
_plot_menu._opts_order.append("i_vs_e_plot")
_plot_menu._opts_order.append("impact_plot")
_plot_menu._opts_order.append("sep") # "sep" is reserved to place a separator in the dropdown
_plot_menu._opts_order.append("e_vs_t_plot")
_plot_menu._opts_order.append("e_vs_t_plot_fit")
_plot_menu._opts_order.append("sep")
_plot_menu._opts_order.append("traj_energy_plot")
_plot_menu._opts_order.append("traj_power_plot")
_plot_menu._opts_order.append("traj_plot")
_plot_menu._opts_order.append("sep")
_plot_menu._opts_order.append("crystal_plot")
# Adds some help menu text for these as well
i_vs_e_info = ' Intensity vs. Energy Plots:\n\n'+\
' Use the .data file, loads from Select File option'
impact_info = ' Impact Plots:\n\n'+\
' Use the .data file, loads from Select File option\n\n'+\
' This plot can be used to generate single shot runs.\n'+\
' Single shot runs require the Sea-Safari and XYZ executables\n'+\
' to be placed in the run directory for safari_detect, and\n'+\
' if VMD is also installed, then it will open VMD showing the\n'+\
' trajectory when the run completes'
e_vs_t_info = ' Energy vs. Theta Plots:\n\n'+\
' Use the .spec file, loads from Select File option\n\n'+\
' The "Fit" version also attempts to fit peaks and error\n'+\
' bars to the spectra represented by each column of the plot\n\n'+\
' Comparison Data files can be plotted over the fit version of this plot'
traj_energy_info = ' Trajectory Energy Plots:\n\n'+\
' Use the .traj file generated by a single shot run, loads from Select Traj option\n\n'+\
' These show energy as a function of time for the projectile in the single shot run\n\n'+\
' Single Shot runs can be generated via the Impact Plots'
traj_info = ' Trajectory Plots:\n\n'+\
' Use the .traj file generated by a single shot run, loads from Select Traj option\n\n'+\
' These show the physical trajectory for the projectile in the single shot run\n\n'+\
' Single Shot runs can be generated via the Impact Plots'
crys_info = ' Crystal Plots:\n\n'+\
' These show the locations of the lattice sites, as well as the active area\n'+\
' The green outlined box is the area of interest defined in the input files,\n'+\
' and if there is a custom surface mask, it is outlined in blue.'
_plot_menu._helps["i_vs_e_plot"] = i_vs_e_info
_plot_menu._helps["impact_plot"] = impact_info
_plot_menu._helps["e_vs_t_plot"] = e_vs_t_info
_plot_menu._helps["traj_energy_plot"] = traj_energy_info
_plot_menu._helps["traj_plot"] = traj_info
_plot_menu._helps["crystal_plot"] = crys_info
# Adds labels for the non-separator values
_plot_menu._labels["i_vs_e_plot"] = "Intensity vs. Energy Plot"
_plot_menu._labels["impact_plot"] = "Impact Plot"
_plot_menu._labels["e_vs_t_plot"] = "Energy vs. Theta Plot"
_plot_menu._labels["e_vs_t_plot_fit"] = "Energy vs. Theta Plot (Fit)"
_plot_menu._labels["traj_energy_plot"] = "Trajectory Energy Plot"
_plot_menu._labels["traj_power_plot"] = "Trajectory Power Plot"
_plot_menu._labels["traj_plot"] = "Trajectory Plot"
_plot_menu._labels["crystal_plot"] = "Crystal Plot"
# Specify a label for the menu
_plot_menu._label = "Plot"
# Returns an array of menus (only 1 in this case)
return [_file_menu, _plot_menu]
# Callback for updating the detector/dataset based on changes to dsettings and limits
def options_callback(self, window):
self.detector = SpotDetector(self.dsettings.theta,self.dsettings.phi,self.dsettings.asize)
self.detector.ss_cmd = "python3 data_files/detect_impact.py"
self.detector.safio = self.dataset.safio
self.detector.safio.ESIZE = self.dsettings.esize
self.detector.plots = False
self.detector.pics = True
self.dataset.clear()
self.dataset.safio = self.detector.safio
self.dataset.crystal = detect.loadCrystal(self.safio_file)
self.dataset.detector = self.detector
if window is not None:
window.destroy()
if self.last_run is not None:
self.last_run()
self.last_run = None
# Selects a file for comparison to the plots.
# the .dat and .txt files are to contain an e-theta loop from data to compare to spec files
# the .esa files are for comparing to energy vs intensity logs.
def select_data(self):
global root_path
self.comparison_file = filedialog.askopenfilename(initialdir = root_path, title = "Select file",filetypes = (("Comparison Data Fits",".dat"),("Comparison Data Fits",".txt"),("Comparison ESA Data",".esa")))
test = str(self.comparison_file)
if test == '' or test == '()':
return None
if test == '':
self.comparison_file = None
if self.comparison_file.endswith('esa'):
self.compare_esa_file = self.comparison_file
self.comparison_file = None
if self.last_run is not None:
self.last_run()
# Selects the file to load from, will only show .input and .dbug files
def select_file(self):
global root_path
newfile = filedialog.askopenfilename(initialdir = root_path, title = "Select file",filetypes = (("SAFARI input spec",".input"),("SAFARI input spec",".dbug")))
test = str(newfile)
if test == '' or test == '()':
return None
self.safio_file = newfile
root_path = os.path.dirname(newfile)
self.get_tk().title("SAFARI Detect {}".format(self.safio_file))
safio = safari_input.SafariInput(self.safio_file)
detectorParams = safio.DTECTPAR
self.detector = SpotDetector(45,safio.PHI0,1)
self.detector.ss_cmd = "python3 data_files/detect_impact.py"
if self.limits.last_phi != safio.PHI0:
self.limits.last_phi = safio.PHI0
self.limits.p_max = safio.PHI0 + .5
self.limits.p_min = safio.PHI0 - .5
if self.limits.last_e != safio.E0:
self.limits.last_e = safio.E0
self.limits.e_min = safio.EMIN
self.limits.e_max = safio.EMAX
self.dsettings.theta = self.detector.theta
self.dsettings.phi = self.detector.phi
self.dsettings.asize = 1
self.dsettings.esize = safio.ESIZE
self.dataset = Spectrum()
self.dataset.crystal = detect.loadCrystal(self.safio_file)
self.dataset.name = self.safio_file.replace('.input', '').replace('dbug', '')
self.dataset.safio = safio
self.detector.safio = self.dataset.safio
self.dataset.detector = self.detector
self.dataset.plots = False
self.dataset.pics = False
if self.last_run is not None:
self.last_run()
return self.dataset
# Selects the file to load from, will only show .input and .dbug files
def select_traj_file(self, open_traj=True):
global root_path
newfile = filedialog.askopenfilename(initialdir = root_path, title = "Select file",filetypes = (("SAFARI traj files",".traj"),("SAFARI traj files",".traj")))
test = str(newfile)
if test == '' or test == '()':
return None
self.traj_file = newfile
root_path = os.path.dirname(newfile)
if open_traj:
if self.last_run == self.traj_plot:
self.traj_plot()
elif self.last_run != None:
self.last_run()
else:
self.traj_energy_plot()
return self.traj_file
# Displays the matplotlib figure fig in the main window
def show_fig(self, fig):
if self.canvas is not None:
# If we already have a canvas and toolbar, remove them
# message is not even set on some Linux systems, causing
# errors when trying to call destroy() below
self.canvas.get_tk_widget().message = None
self.toolbar.message = None
self.canvas.get_tk_widget().destroy()
self.toolbar.destroy()
# create the Tkinter canvas containing the Matplotlib figure
self.canvas = FigureCanvasTkAgg(fig, master = self.get_tk())
self.canvas.draw()
# create the toolbar and place it
self.toolbar = NavigationToolbar2Tk(self.canvas, self.get_tk(), pack_toolbar=False)
self.toolbar.update()
self.toolbar.pack(side=tk.BOTTOM, fill=tk.X)
# place the canvas on the Tkinter window
self.canvas.get_tk_widget().pack(side="top",fill='both',expand=True)
# This monitors for new figures to plot, so all plotting, etc happens on main thread
def check_figs(self):
if self.fig is not None:
if self.prep_fig is not None:
self.prep_fig()
self.show_fig(self.fig)
if self.fig_name is not None:
self.fig.savefig(self.fig_name)
self.fig_name = None
self.fig = None
self.prep_fig = None
self.waiting = False
if self.waiting:
if self.canvas is not None:
# We need to cleanup the canvas and toolbar
self.canvas.get_tk_widget().message = None
self.toolbar.message = None
self.canvas.get_tk_widget().destroy()
self.toolbar.destroy()
# Next we should probably make some thing that lets us know that it is waiting?
self.get_tk().after(100, self.check_figs)
# Initializes the dataset based on limits defined by limits
def init_data(self):
_emin = self.limits.e_min
_emax = self.limits.e_max
_phimin = self.limits.p_min
_phimax = self.limits.p_max
_thmin = self.limits.t_min
_thmax = self.limits.t_max
self.title_loading()
self.dataset.clean(emin=_emin,emax=_emax,\
phimin=_phimin,phimax=_phimax,\
thmin=_thmin,thmax=_thmax)
self.title_selected()
self.dataset.plots = False
self.dataset.pics = False
self.detector.plots = False
self.detector.pics = False
# This monitors for if a single shot run is in progress, and if so, it will give an indication that it is still running
def check_single_shot(self):
if len(self.single_shots) > 0:
to_clean = []
for vmd_file, state in self.single_shots.items():
# State 0: waiting for things to run
if state == 0:
self.title_text("Running Single Shot!")
if os.path.isfile(vmd_file):
self.single_shots[vmd_file] = 1
# State 1: vmd opened
elif state == 1:
self.title_text("Finished Single Shot!")
if not os.path.isfile(vmd_file):
self.single_shots[vmd_file] = 2
# State 2: completely finished
else:
self.title_selected()
to_clean.append(vmd_file)
for key in to_clean:
self.single_shots.pop(key)
self.get_tk().after(500, self.check_single_shot)
# This records that the run has started, so it can be watched for updating application title
def register_single_shot(self, vmd_file):
self.single_shots[vmd_file] = 0
# Produces an energy vs theta plot, this requires the .spec file to exist.
def e_vs_t_plot(self, fit=False):
# Select a file if we don't have one already.
if self.dataset is None:
ret = self.select_file()
if ret is None:
# If no file to select, just return early
return
else:
self.dataset = ret
if not fit:
self.last_run = self.e_vs_t_plot
else:
def replot():
self.e_vs_t_plot(fit=True)
self.last_run = replot
self.fig = None
self.waiting = True
fig, ax = plt.subplots(figsize=(12.0, 9.0))
# Wraps this for a separate thread, allowing off-thread processing, but still running all of the matplotlib stuff on the main thread
def do_work():
self.title_loading()
spec_file = self.safio_file.replace('.input','').replace('.dbug','')+'.spec'
spec = Spec(spec_file)
spec.peak_finder = esa.peak_finder
spec.min_e = self.limits.e_min
spec.fig, spec.ax = fig, ax
spec.big_font = False
spec.process_data(d_phi=self.limits.p_max-self.limits.p_min)
spec.make_e_t_plot(do_plot=False, do_fits=fit)
if fit:
self.title_text('Fitting, Please Wait')
e_max = spec.e_range[1]
e_min = spec.e_range[0]
t_min = spec.t_range[0]
t_max = spec.t_range[1]
axis = esa.make_axis(e_min, e_max, spec.energy, spec.img.shape[0]) * spec.energy
# Set the width for integration function
spec.e_res = self.dsettings.esize
# Sets width for integrating internally during fitting
spec.winv = 5
# Sets the gaussian integration function
spec.integrate = dtect_proc.integrate
# Attempt to fit the columns of the image
spec.try_fit(esa.fit_esa, axis, ax)
if self.comparison_file is not None:
theta, energy, err = esa.load_data(self.comparison_file)
ax.scatter(theta,energy,c='r',s=4,label="Data")
if err is not None:
ax.errorbar(theta,energy,yerr=err, c='r',fmt='none',capsize=2)
ax.legend()
# Here we update these to indicate that we have finished processing
self.prep_fig = spec.prep_fig
self.fig_name = spec_file.replace('.spec', '_fit_spec.png')
self.fig = fig
# Reset title to selected now that we are done
self.title_selected()
# Schedule this on a worker thread
thread = threading.Thread(target=do_work)
thread.start()
# Produces an intensity vs energy plot
def i_vs_e_plot(self):
# Select a file if we don't have one already.
if self.dataset is None:
ret = self.select_file()
if ret is None:
# If no file to select, just return early
return
else:
self.dataset = ret
self.last_run = self.i_vs_e_plot
self.fig = None
self.waiting = True
plots = plt.subplots(figsize=(8.0, 6.0))
# Wraps this for a separate thread, allowing off-thread processing, but still running all of the matplotlib stuff on the main thread
def do_work():
self.title_loading()
self.init_data()
self.title_text('Processing, Please Wait')
energy, intensity, scale = self.detector.spectrumE(res=self.detector.safio.ESIZE, override_fig=plots)
# Here we update these to indicate that we have finished processing
self.prep_fig = self.detector.prep_fig
if self.compare_esa_file is not None:
def new_prep():
self.detector.prep_fig()
self.add_esa_spec(plots)
self.prep_fig = new_prep
self.fig_name = self.detector.fig_name
self.fig = self.detector.fig
self.title_selected()
# Schedule this on a worker thread
thread = threading.Thread(target=do_work)
thread.start()
# Produces an impact plot
def impact_plot(self):
# Select a file if we don't have one already.
if self.dataset is None:
ret = self.select_file()
if ret is None:
# If no file to select, just return early
return
else:
self.dataset = ret
self.last_run = self.impact_plot
self.fig = None
self.waiting = True
self.detector.ss_callback = self.register_single_shot
plots = plt.subplots(figsize=(12.0, 9.0))
# Wraps this for a separate thread, allowing off-thread processing, but still running all of the matplotlib stuff on the main thread
def do_work():
self.title_loading()
self.init_data()
self.title_text('Processing, Please Wait')
self.detector.impactParam(basis=self.dataset.crystal, override_fig=plots)
fig, ax = self.detector.fig, self.detector.ax
# Here we update these to indicate that we have finished processing
self.prep_fig = self.detector.prep_fig
self.fig_name = self.detector.fig_name
self.fig = self.detector.fig
# Switch to finished title
self.title_selected()
# Schedule this on a worker thread
thread = threading.Thread(target=do_work)
thread.start()
# Produces a plot of energy as a function of time for the projectile during a single shot run
def traj_energy_plot(self):
if self.traj_file is None:
self.select_traj_file()
return
self.last_run = self.traj_energy_plot
self.fig = None
self.waiting = True
fig, ax = plt.subplots(figsize=(12.0, 9.0))
def do_work():
self.title_text('Loading Traj')
traj = plot_traj.Traj()
traj.load(self.traj_file)
traj.plot_energies(ax)
self.fig = fig
self.fig_name = self.traj_file.replace('.traj', '_traj_energy.png')
self.title_text('Trajectory Energies')
# Schedule this on a worker thread
thread = threading.Thread(target=do_work)
thread.start()
# Produces a plot of power as a function of time for the projectile during a single shot run
def traj_power_plot(self):
self.last_run = self.traj_power_plot
if self.traj_file is None:
self.select_traj_file()
return
self.fig = None
self.waiting = True
fig, ax = plt.subplots(figsize=(12.0, 9.0))
def do_work():
self.title_text('Loading Traj')
traj = plot_traj.Traj()
traj.load(self.traj_file)
traj.plot_power(ax)
self.fig = fig
self.fig_name = self.traj_file.replace('.traj', '_traj_power.png')
self.title_text('Trajectory Power')
# Schedule this on a worker thread
thread = threading.Thread(target=do_work)
thread.start()
# Produces a 3d trajectory plot for the particle
def traj_plot(self):
if self.traj_file is None:
opened = self.select_traj_file(open_traj=False)
if opened is None:
return
self.last_run = self.traj_plot
self.fig = None
self.waiting = True
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('X (Å)')
ax.set_ylabel('Y (Å)')
ax.set_zlabel('Z (Å)')
def do_work():
self.title_text('Loading Traj')
traj = plot_traj.Traj()
traj.load(self.traj_file)
if self.traj_settings.show_lattice and self.safio_file is not None:
crystalview.plot(self.safio_file, ax, do_lims=False, do_bounds=False)
traj.plot_traj_3d(fig, ax)
self.fig = fig
self.fig_name = self.traj_file.replace('.traj', '_traj.png')
self.title_text('Trajectory Plot')
# Schedule this on a worker thread
thread = threading.Thread(target=do_work)
thread.start()
# Produces a 3d plot of the crystal used for scattering, also includes indications of the overlay of the
# active area of the surface, as well as the possible surface mask
def crystal_plot(self):
# Select a file if we don't have one already.
if self.dataset is None:
ret = self.select_file()
if ret is None:
# If no file to select, just return early
return
else:
self.dataset = ret
self.last_run = self.crystal_plot
self.fig = None
self.waiting = True
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('X (Å)')
ax.set_ylabel('Y (Å)')
ax.set_zlabel('Z (Å)')
# Wraps this for a separate thread, allowing off-thread processing, but still running all of the matplotlib stuff on the main thread
def do_work():
self.title_loading()
self.title_text('Processing, Please Wait')
crystalview.plot(self.safio_file, ax)
# Here we update these to indicate that we have finished processing
self.prep_fig = None
self.fig_name = None
self.fig = fig
self.title_selected()
# Schedule this on a worker thread
thread = threading.Thread(target=do_work)
thread.start()
# Adds the loaded .esa file to the given plots, used for comparing Intensity vs. Energy plots
def add_esa_spec(self, plots):
fig, ax = plots
E, I = esa_data.load_esa(self.compare_esa_file, scale_by_E=self.comp_setitngs.scale_by_E, normalise=self.comp_setitngs.normalise)
E = E / self.detector.safio.E0
ax.plot(E, I, label="Data")
# Sets title to showing current input file
def title_selected(self):
self.title_text('')
# Sets title to saying loading, please wait
def title_loading(self):
self.title_text('Loading, Please Wait')
# Updates the title of the window to include the given text
def title_text(self, text):
if text.strip() != '':
self.get_tk().title("{} {}; {}".format(self.base_name, self.safio_file, text))
else:
self.get_tk().title("{} {}".format(self.base_name, self.safio_file))
|
auto-build.py
|
import inotify.adapters
import os
import threading
import time
COUNT = -1
RUN = True
def run():
global COUNT
while RUN:
time.sleep(1)
if COUNT == 0:
make()
COUNT = -1
if COUNT > 0:
COUNT = 0
t = threading.Thread(target=run)
t.start()
def make():
print "Making"
os.system("latexmk spine.tex")
WRITING_SET = frozenset(('IN_MOVED_TO', 'IN_MODIFY', 'IN_CREATE',
'IN_CLOSE_WRITE'))
def is_writing(type_names):
return bool(WRITING_SET.intersection(type_names))
i = inotify.adapters.Inotify()
i.add_watch('/home/joao/Dropbox/Faculdade/PhD/tese/tex')
while True:
try:
for event in i.event_gen():
if event is not None:
(header, type_names, watch_path, filename) = event
if filename == 'spine.tex' and is_writing(type_names):
COUNT += 1
print COUNT
except KeyboardInterrupt:
RUN = False
break
except IOError:
pass
|
test_bot.py
|
import pytest
import pytest_timeout
import zipfile
import requests
import time
import yaml
import chess
import chess.engine
import threading
import os
import sys
import stat
import shutil
import importlib
if __name__ == "__main__":
sys.exit(f"The script {os.path.basename(__file__)} should only be run by pytest.")
shutil.copyfile("lichess.py", "correct_lichess.py")
shutil.copyfile("test_bot/lichess.py", "lichess.py")
lichess_bot = importlib.import_module("lichess-bot")
platform = sys.platform
file_extension = ".exe" if platform == "win32" else ""
def download_sf():
windows_or_linux = "win" if platform == "win32" else "linux"
response = requests.get(f"https://stockfishchess.org/files/stockfish_14.1_{windows_or_linux}_x64.zip", allow_redirects=True)
with open("./TEMP/sf_zip.zip", "wb") as file:
file.write(response.content)
with zipfile.ZipFile("./TEMP/sf_zip.zip", "r") as zip_ref:
zip_ref.extractall("./TEMP/")
shutil.copyfile(f"./TEMP/stockfish_14.1_{windows_or_linux}_x64/stockfish_14.1_{windows_or_linux}_x64{file_extension}", f"./TEMP/sf{file_extension}")
shutil.copyfile(f"./TEMP/sf{file_extension}", f"./TEMP/sf2{file_extension}")
if windows_or_linux == "linux":
st = os.stat(f"./TEMP/sf{file_extension}")
os.chmod(f"./TEMP/sf{file_extension}", st.st_mode | stat.S_IEXEC)
st = os.stat(f"./TEMP/sf2{file_extension}")
os.chmod(f"./TEMP/sf2{file_extension}", st.st_mode | stat.S_IEXEC)
def download_lc0():
response = requests.get("https://github.com/LeelaChessZero/lc0/releases/download/v0.28.2/lc0-v0.28.2-windows-cpu-dnnl.zip", allow_redirects=True)
with open("./TEMP/lc0_zip.zip", "wb") as file:
file.write(response.content)
with zipfile.ZipFile("./TEMP/lc0_zip.zip", "r") as zip_ref:
zip_ref.extractall("./TEMP/")
def download_sjeng():
response = requests.get("https://sjeng.org/ftp/Sjeng112.zip", allow_redirects=True)
with open("./TEMP/sjeng_zip.zip", "wb") as file:
file.write(response.content)
with zipfile.ZipFile("./TEMP/sjeng_zip.zip", "r") as zip_ref:
zip_ref.extractall("./TEMP/")
shutil.copyfile("./TEMP/Release/Sjeng112.exe", "./TEMP/sjeng.exe")
if os.path.exists("TEMP"):
shutil.rmtree("TEMP")
os.mkdir("TEMP")
download_sf()
if platform == "win32":
download_lc0()
download_sjeng()
logging_level = lichess_bot.logging.INFO
lichess_bot.logging.basicConfig(level=logging_level, filename=None, format="%(asctime)-15s: %(message)s")
lichess_bot.enable_color_logging(debug_lvl=logging_level)
lichess_bot.logger.info("Downloaded engines")
def run_bot(CONFIG, logging_level, stockfish_path):
lichess_bot.logger.info(lichess_bot.intro())
li = lichess_bot.lichess.Lichess(CONFIG["token"], CONFIG["url"], lichess_bot.__version__)
user_profile = li.get_profile()
username = user_profile["username"]
is_bot = user_profile.get("title") == "BOT"
lichess_bot.logger.info(f"Welcome {username}!")
if not is_bot:
is_bot = lichess_bot.upgrade_account(li)
if is_bot:
def run_test():
def thread_for_test():
open("./logs/events.txt", "w").close()
open("./logs/states.txt", "w").close()
open("./logs/result.txt", "w").close()
start_time = 10
increment = 0.1
board = chess.Board()
wtime = start_time
btime = start_time
with open("./logs/states.txt", "w") as file:
file.write(f"\n{wtime},{btime}")
engine = chess.engine.SimpleEngine.popen_uci(stockfish_path)
engine.configure({"Skill Level": 0, "Move Overhead": 1000})
while True:
if board.is_game_over():
with open("./logs/events.txt", "w") as file:
file.write("end")
break
if len(board.move_stack) % 2 == 0:
if not board.move_stack:
move = engine.play(board, chess.engine.Limit(time=1), ponder=False)
else:
start_time = time.perf_counter_ns()
move = engine.play(board, chess.engine.Limit(white_clock=wtime - 2, white_inc=increment), ponder=False)
end_time = time.perf_counter_ns()
wtime -= (end_time - start_time) / 1e9
wtime += increment
board.push(move.move)
uci_move = move.move.uci()
with open("./logs/states.txt") as states:
state = states.read().split("\n")
state[0] += f" {uci_move}"
state = "\n".join(state)
with open("./logs/states.txt", "w") as file:
file.write(state)
else: # lichess-bot move
start_time = time.perf_counter_ns()
while True:
with open("./logs/states.txt") as states:
state2 = states.read()
time.sleep(0.001)
moves = state2.split("\n")[0]
temp_board = chess.Board()
moves_are_correct = True
for move in moves.split():
try:
temp_board.push_uci(move)
except ValueError:
moves_are_correct = False
if state != state2 and moves_are_correct:
break
with open("./logs/states.txt") as states:
state2 = states.read()
end_time = time.perf_counter_ns()
if len(board.move_stack) > 1:
btime -= (end_time - start_time) / 1e9
btime += increment
move = state2.split("\n")[0].split(" ")[-1]
board.push_uci(move)
time.sleep(0.001)
with open("./logs/states.txt") as states:
state = states.read().split("\n")
state[1] = f"{wtime},{btime}"
state = "\n".join(state)
with open("./logs/states.txt", "w") as file:
file.write(state)
engine.quit()
win = board.is_checkmate() and board.turn == chess.WHITE
with open("./logs/result.txt", "w") as file:
file.write("1" if win else "0")
thr = threading.Thread(target=thread_for_test)
thr.start()
lichess_bot.start(li, user_profile, CONFIG, logging_level, None, one_game=True)
thr.join()
run_test()
with open("./logs/result.txt") as file:
data = file.read()
return data
else:
lichess_bot.logger.error(f'{user_profile["username"]} is not a bot account. Please upgrade it to a bot account!')
@pytest.mark.timeout(150, method="thread")
def test_sf():
if platform != "linux" and platform != "win32":
assert True
return
if os.path.exists("logs"):
shutil.rmtree("logs")
os.mkdir("logs")
with open("./config.yml.default") as file:
CONFIG = yaml.safe_load(file)
CONFIG["token"] = ""
CONFIG["engine"]["dir"] = "./TEMP/"
CONFIG["engine"]["name"] = f"sf{file_extension}"
CONFIG["engine"]["uci_options"]["Threads"] = 1
CONFIG["pgn_directory"] = "TEMP/sf_game_record"
stockfish_path = f"./TEMP/sf2{file_extension}"
win = run_bot(CONFIG, logging_level, stockfish_path)
shutil.rmtree("logs")
lichess_bot.logger.info("Finished Testing SF")
assert win == "1"
assert os.path.isfile(os.path.join(CONFIG["pgn_directory"], "bo vs b - zzzzzzzz.pgn"))
@pytest.mark.timeout(150, method="thread")
def test_lc0():
if platform != "win32":
assert True
return
if os.path.exists("logs"):
shutil.rmtree("logs")
os.mkdir("logs")
with open("./config.yml.default") as file:
CONFIG = yaml.safe_load(file)
CONFIG["token"] = ""
CONFIG["engine"]["dir"] = "./TEMP/"
CONFIG["engine"]["working_dir"] = "./TEMP/"
CONFIG["engine"]["name"] = "lc0.exe"
CONFIG["engine"]["uci_options"]["Threads"] = 1
CONFIG["engine"]["uci_options"].pop("Hash", None)
CONFIG["engine"]["uci_options"].pop("Move Overhead", None)
CONFIG["pgn_directory"] = "TEMP/lc0_game_record"
stockfish_path = "./TEMP/sf2.exe"
win = run_bot(CONFIG, logging_level, stockfish_path)
shutil.rmtree("logs")
lichess_bot.logger.info("Finished Testing LC0")
assert win == "1"
assert os.path.isfile(os.path.join(CONFIG["pgn_directory"], "bo vs b - zzzzzzzz.pgn"))
@pytest.mark.timeout(150, method="thread")
def test_sjeng():
if platform != "win32":
assert True
return
if os.path.exists("logs"):
shutil.rmtree("logs")
os.mkdir("logs")
with open("./config.yml.default") as file:
CONFIG = yaml.safe_load(file)
CONFIG["token"] = ""
CONFIG["engine"]["dir"] = "./TEMP/"
CONFIG["engine"]["working_dir"] = "./TEMP/"
CONFIG["engine"]["protocol"] = "xboard"
CONFIG["engine"]["name"] = "sjeng.exe"
CONFIG["engine"]["ponder"] = False
CONFIG["pgn_directory"] = "TEMP/sjeng_game_record"
stockfish_path = "./TEMP/sf2.exe"
win = run_bot(CONFIG, logging_level, stockfish_path)
shutil.rmtree("logs")
lichess_bot.logger.info("Finished Testing Sjeng")
assert win == "1"
assert os.path.isfile(os.path.join(CONFIG["pgn_directory"], "bo vs b - zzzzzzzz.pgn"))
@pytest.mark.timeout(150, method="thread")
def test_homemade():
if platform != "linux" and platform != "win32":
assert True
return
with open("strategies.py") as file:
strategies = file.read()
original_strategies = strategies
strategies = strategies.split("\n")
strategies += ["class Stockfish(ExampleEngine):", " def __init__(self, commands, options, stderr, draw_or_resign, **popen_args):", " super().__init__(commands, options, stderr, draw_or_resign, **popen_args)", f" self.engine = chess.engine.SimpleEngine.popen_uci('./TEMP/sf2{file_extension}')", " def search(self, board, time_limit, *args):", " return self.engine.play(board, time_limit)"]
with open("strategies.py", "w") as file:
file.write("\n".join(strategies))
if os.path.exists("logs"):
shutil.rmtree("logs")
os.mkdir("logs")
with open("./config.yml.default") as file:
CONFIG = yaml.safe_load(file)
CONFIG["token"] = ""
CONFIG["engine"]["name"] = "Stockfish"
CONFIG["engine"]["protocol"] = "homemade"
CONFIG["pgn_directory"] = "TEMP/homemade_game_record"
stockfish_path = f"./TEMP/sf2{file_extension}"
win = run_bot(CONFIG, logging_level, stockfish_path)
shutil.rmtree("logs")
with open("strategies.py", "w") as file:
file.write(original_strategies)
lichess_bot.logger.info("Finished Testing Homemade")
assert win == "1"
assert os.path.isfile(os.path.join(CONFIG["pgn_directory"], "bo vs b - zzzzzzzz.pgn"))
|
workers_zmq.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 9 22:28:44 2018
Based on http://mdup.fr/blog/easy-cluster-parallelization-with-zeromq
@author: vpekar
"""
import sys
import zmq
from multiprocessing import Process
import settings
from get_logger import get_logger
from utils import run_config
learner = sys.argv[1]
assert learner in settings.__dict__
LOGGER = get_logger('main', 'logs/workers_zmq_%s.log' % learner)
def slave(worker_id):
import logging
logging.getLogger("matplotlib").disabled = True
# Setup ZMQ.
context = zmq.Context()
sock = context.socket(zmq.REQ)
sock.connect(settings.ZMQ["master_address"])
while True:
LOGGER.debug("%s: Available" % worker_id)
sock.send_pyobj({"msg": "available"})
# Retrieve work and run the computation.
job = sock.recv_pyobj()
if job.get("msg") == "quit":
LOGGER.debug("%s: Received a quit msg, exiting" % worker_id)
break
LOGGER.debug("%s: Running config %s" % (worker_id, job["data"][1]))
result = run_config(job["data"])
LOGGER.debug("%s: Sending result back" % worker_id)
sock.send_pyobj({"msg": "result", "result": result})
LOGGER.debug("%s: Done sending result" % worker_id)
msg = sock.recv()
if msg == b"quit":
LOGGER.debug("%s Received msg %s" % (worker_id, msg))
break
if __name__ == "__main__":
# Create a pool of workers to distribute work to
for _id in range(settings.PREPROCESSING['n_jobs']):
Process(target=slave, args=(_id,)).start()
|
gcsio.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Google Cloud Storage client.
This library evolved from the Google App Engine GCS client available at
https://github.com/GoogleCloudPlatform/appengine-gcs-client.
"""
# pytype: skip-file
import errno
import io
import logging
import multiprocessing
import re
import threading
import time
import traceback
from itertools import islice
from apache_beam.internal.http_client import get_new_http
from apache_beam.internal.metrics.metric import ServiceCallMetric
from apache_beam.io.filesystemio import Downloader
from apache_beam.io.filesystemio import DownloaderStream
from apache_beam.io.filesystemio import PipeStream
from apache_beam.io.filesystemio import Uploader
from apache_beam.io.filesystemio import UploaderStream
from apache_beam.io.gcp import resource_identifiers
from apache_beam.metrics import monitoring_infos
from apache_beam.utils import retry
__all__ = ['GcsIO']
_LOGGER = logging.getLogger(__name__)
# Issue a friendlier error message if the storage library is not available.
# TODO(silviuc): Remove this guard when storage is available everywhere.
try:
# pylint: disable=wrong-import-order, wrong-import-position
# pylint: disable=ungrouped-imports
import apitools.base.py.transfer as transfer
from apitools.base.py.batch import BatchApiRequest
from apitools.base.py.exceptions import HttpError
from apache_beam.internal.gcp import auth
from apache_beam.io.gcp.internal.clients import storage
except ImportError:
raise ImportError(
'Google Cloud Storage I/O not supported for this execution environment '
'(could not import storage API client).')
# This is the size of each partial-file read operation from GCS. This
# parameter was chosen to give good throughput while keeping memory usage at
# a reasonable level; the following table shows throughput reached when
# reading files of a given size with a chosen buffer size and informed the
# choice of the value, as of 11/2016:
#
# +---------------+------------+-------------+-------------+-------------+
# | | 50 MB file | 100 MB file | 200 MB file | 400 MB file |
# +---------------+------------+-------------+-------------+-------------+
# | 8 MB buffer | 17.12 MB/s | 22.67 MB/s | 23.81 MB/s | 26.05 MB/s |
# | 16 MB buffer | 24.21 MB/s | 42.70 MB/s | 42.89 MB/s | 46.92 MB/s |
# | 32 MB buffer | 28.53 MB/s | 48.08 MB/s | 54.30 MB/s | 54.65 MB/s |
# | 400 MB buffer | 34.72 MB/s | 71.13 MB/s | 79.13 MB/s | 85.39 MB/s |
# +---------------+------------+-------------+-------------+-------------+
DEFAULT_READ_BUFFER_SIZE = 16 * 1024 * 1024
# This is the number of seconds the library will wait for a partial-file read
# operation from GCS to complete before retrying.
DEFAULT_READ_SEGMENT_TIMEOUT_SECONDS = 60
# This is the size of chunks used when writing to GCS.
WRITE_CHUNK_SIZE = 8 * 1024 * 1024
# Maximum number of operations permitted in GcsIO.copy_batch() and
# GcsIO.delete_batch().
MAX_BATCH_OPERATION_SIZE = 100
# Batch endpoint URL for GCS.
# We have to specify an API specific endpoint here since Google APIs global
# batch endpoints will be deprecated on 03/25/2019.
# See https://developers.googleblog.com/2018/03/discontinuing-support-for-json-rpc-and.html. # pylint: disable=line-too-long
# Currently apitools library uses a global batch endpoint by default:
# https://github.com/google/apitools/blob/master/apitools/base/py/batch.py#L152
# TODO: remove this constant and it's usage after apitools move to using an API
# specific batch endpoint or after Beam gcsio module start using a GCS client
# library that does not use global batch endpoints.
GCS_BATCH_ENDPOINT = 'https://www.googleapis.com/batch/storage/v1'
def parse_gcs_path(gcs_path, object_optional=False):
"""Return the bucket and object names of the given gs:// path."""
match = re.match('^gs://([^/]+)/(.*)$', gcs_path)
if match is None or (match.group(2) == '' and not object_optional):
raise ValueError(
'GCS path must be in the form gs://<bucket>/<object>. '
f'Encountered {gcs_path!r}')
return match.group(1), match.group(2)
def default_gcs_bucket_name(project, region):
from hashlib import md5
return 'dataflow-staging-%s-%s' % (
region, md5(project.encode('utf8')).hexdigest())
def get_or_create_default_gcs_bucket(options):
"""Create a default GCS bucket for this project."""
if getattr(options, 'dataflow_kms_key', None):
_LOGGER.warning(
'Cannot create a default bucket when --dataflow_kms_key is set.')
return None
project = getattr(options, 'project', None)
region = getattr(options, 'region', None)
if not project or not region:
return None
bucket_name = default_gcs_bucket_name(project, region)
bucket = GcsIO().get_bucket(bucket_name)
if bucket:
return bucket
else:
_LOGGER.warning(
'Creating default GCS bucket for project %s: gs://%s',
project,
bucket_name)
return GcsIO().create_bucket(bucket_name, project, location=region)
class GcsIOError(IOError, retry.PermanentException):
"""GCS IO error that should not be retried."""
pass
class GcsIO(object):
"""Google Cloud Storage I/O client."""
def __init__(self, storage_client=None):
if storage_client is None:
storage_client = storage.StorageV1(
credentials=auth.get_service_credentials(),
get_credentials=False,
http=get_new_http(),
response_encoding='utf8')
self.client = storage_client
self._rewrite_cb = None
self.bucket_to_project_number = {}
def get_project_number(self, bucket):
if bucket not in self.bucket_to_project_number:
bucket_metadata = self.get_bucket(bucket_name=bucket)
self.bucket_to_project_number[bucket] = bucket_metadata.projectNumber
return self.bucket_to_project_number[bucket]
def _set_rewrite_response_callback(self, callback):
"""For testing purposes only. No backward compatibility guarantees.
Args:
callback: A function that receives ``storage.RewriteResponse``.
"""
self._rewrite_cb = callback
def get_bucket(self, bucket_name):
"""Returns an object bucket from its name, or None if it does not exist."""
try:
request = storage.StorageBucketsGetRequest(bucket=bucket_name)
return self.client.buckets.Get(request)
except HttpError:
return None
def create_bucket(self, bucket_name, project, kms_key=None, location=None):
"""Create and return a GCS bucket in a specific project."""
encryption = None
if kms_key:
encryption = storage.Bucket.EncryptionValue(kms_key)
request = storage.StorageBucketsInsertRequest(
bucket=storage.Bucket(
name=bucket_name, location=location, encryption=encryption),
project=project,
)
try:
return self.client.buckets.Insert(request)
except HttpError:
return None
def open(
self,
filename,
mode='r',
read_buffer_size=DEFAULT_READ_BUFFER_SIZE,
mime_type='application/octet-stream'):
"""Open a GCS file path for reading or writing.
Args:
filename (str): GCS file path in the form ``gs://<bucket>/<object>``.
mode (str): ``'r'`` for reading or ``'w'`` for writing.
read_buffer_size (int): Buffer size to use during read operations.
mime_type (str): Mime type to set for write operations.
Returns:
GCS file object.
Raises:
ValueError: Invalid open file mode.
"""
if mode == 'r' or mode == 'rb':
downloader = GcsDownloader(
self.client,
filename,
buffer_size=read_buffer_size,
get_project_number=self.get_project_number)
return io.BufferedReader(
DownloaderStream(
downloader, read_buffer_size=read_buffer_size, mode=mode),
buffer_size=read_buffer_size)
elif mode == 'w' or mode == 'wb':
uploader = GcsUploader(
self.client,
filename,
mime_type,
get_project_number=self.get_project_number)
return io.BufferedWriter(
UploaderStream(uploader, mode=mode), buffer_size=128 * 1024)
else:
raise ValueError('Invalid file open mode: %s.' % mode)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def delete(self, path):
"""Deletes the object at the given GCS path.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
try:
self.client.objects.Delete(request)
except HttpError as http_error:
if http_error.status_code == 404:
# Return success when the file doesn't exist anymore for idempotency.
return
raise
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def delete_batch(self, paths):
"""Deletes the objects at the given GCS paths.
Args:
paths: List of GCS file path patterns in the form gs://<bucket>/<name>,
not to exceed MAX_BATCH_OPERATION_SIZE in length.
Returns: List of tuples of (path, exception) in the same order as the paths
argument, where exception is None if the operation succeeded or
the relevant exception if the operation failed.
"""
if not paths:
return []
paths = iter(paths)
result_statuses = []
while True:
paths_chunk = list(islice(paths, MAX_BATCH_OPERATION_SIZE))
if not paths_chunk:
return result_statuses
batch_request = BatchApiRequest(
batch_url=GCS_BATCH_ENDPOINT,
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES,
response_encoding='utf-8')
for path in paths_chunk:
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
batch_request.Add(self.client.objects, 'Delete', request)
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
for i, api_call in enumerate(api_calls):
path = paths_chunk[i]
exception = None
if api_call.is_error:
exception = api_call.exception
# Return success when the file doesn't exist anymore for idempotency.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = None
result_statuses.append((path, exception))
return result_statuses
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def copy(
self,
src,
dest,
dest_kms_key_name=None,
max_bytes_rewritten_per_call=None):
"""Copies the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
dest_kms_key_name: Experimental. No backwards compatibility guarantees.
Encrypt dest with this Cloud KMS key. If None, will use dest bucket
encryption defaults.
max_bytes_rewritten_per_call: Experimental. No backwards compatibility
guarantees. Each rewrite API call will return after these many bytes.
Used for testing.
Raises:
TimeoutError: on timeout.
"""
src_bucket, src_path = parse_gcs_path(src)
dest_bucket, dest_path = parse_gcs_path(dest)
request = storage.StorageObjectsRewriteRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path,
destinationKmsKeyName=dest_kms_key_name,
maxBytesRewrittenPerCall=max_bytes_rewritten_per_call)
response = self.client.objects.Rewrite(request)
while not response.done:
_LOGGER.debug(
'Rewrite progress: %d of %d bytes, %s to %s',
response.totalBytesRewritten,
response.objectSize,
src,
dest)
request.rewriteToken = response.rewriteToken
response = self.client.objects.Rewrite(request)
if self._rewrite_cb is not None:
self._rewrite_cb(response)
_LOGGER.debug('Rewrite done: %s to %s', src, dest)
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def copy_batch(
self,
src_dest_pairs,
dest_kms_key_name=None,
max_bytes_rewritten_per_call=None):
"""Copies the given GCS object from src to dest.
Args:
src_dest_pairs: list of (src, dest) tuples of gs://<bucket>/<name> files
paths to copy from src to dest, not to exceed
MAX_BATCH_OPERATION_SIZE in length.
dest_kms_key_name: Experimental. No backwards compatibility guarantees.
Encrypt dest with this Cloud KMS key. If None, will use dest bucket
encryption defaults.
max_bytes_rewritten_per_call: Experimental. No backwards compatibility
guarantees. Each rewrite call will return after these many bytes. Used
primarily for testing.
Returns: List of tuples of (src, dest, exception) in the same order as the
src_dest_pairs argument, where exception is None if the operation
succeeded or the relevant exception if the operation failed.
"""
if not src_dest_pairs:
return []
pair_to_request = {}
for pair in src_dest_pairs:
src_bucket, src_path = parse_gcs_path(pair[0])
dest_bucket, dest_path = parse_gcs_path(pair[1])
request = storage.StorageObjectsRewriteRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path,
destinationKmsKeyName=dest_kms_key_name,
maxBytesRewrittenPerCall=max_bytes_rewritten_per_call)
pair_to_request[pair] = request
pair_to_status = {}
while True:
pairs_in_batch = list(set(src_dest_pairs) - set(pair_to_status))
if not pairs_in_batch:
break
batch_request = BatchApiRequest(
batch_url=GCS_BATCH_ENDPOINT,
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES,
response_encoding='utf-8')
for pair in pairs_in_batch:
batch_request.Add(self.client.objects, 'Rewrite', pair_to_request[pair])
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
for pair, api_call in zip(pairs_in_batch, api_calls):
src, dest = pair
response = api_call.response
if self._rewrite_cb is not None:
self._rewrite_cb(response)
if api_call.is_error:
exception = api_call.exception
# Translate 404 to the appropriate not found exception.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = (
GcsIOError(errno.ENOENT, 'Source file not found: %s' % src))
pair_to_status[pair] = exception
elif not response.done:
_LOGGER.debug(
'Rewrite progress: %d of %d bytes, %s to %s',
response.totalBytesRewritten,
response.objectSize,
src,
dest)
pair_to_request[pair].rewriteToken = response.rewriteToken
else:
_LOGGER.debug('Rewrite done: %s to %s', src, dest)
pair_to_status[pair] = None
return [(pair[0], pair[1], pair_to_status[pair]) for pair in src_dest_pairs]
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def copytree(self, src, dest):
"""Renames the given GCS "directory" recursively from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>/.
dest: GCS file path pattern in the form gs://<bucket>/<name>/.
"""
assert src.endswith('/')
assert dest.endswith('/')
for entry in self.list_prefix(src):
rel_path = entry[len(src):]
self.copy(entry, dest + rel_path)
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def rename(self, src, dest):
"""Renames the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
"""
self.copy(src, dest)
self.delete(src)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def exists(self, path):
"""Returns whether the given GCS object exists.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
try:
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
self.client.objects.Get(request) # metadata
return True
except HttpError as http_error:
if http_error.status_code == 404:
# HTTP 404 indicates that the file did not exist
return False
else:
# We re-raise all other exceptions
raise
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def checksum(self, path):
"""Looks up the checksum of a GCS object.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).crc32c
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def size(self, path):
"""Returns the size of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: size of the GCS object in bytes.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).size
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def kms_key(self, path):
"""Returns the KMS key of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: KMS key name of the GCS object as a string, or None if it doesn't
have one.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).kmsKeyName
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def last_updated(self, path):
"""Returns the last updated epoch time of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: last updated time of the GCS object in second.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
datetime = self.client.objects.Get(request).updated
return (
time.mktime(datetime.timetuple()) - time.timezone +
datetime.microsecond / 1000000.0)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def list_prefix(self, path):
"""Lists files matching the prefix.
Args:
path: GCS file path pattern in the form gs://<bucket>/[name].
Returns:
Dictionary of file name -> size.
"""
bucket, prefix = parse_gcs_path(path, object_optional=True)
request = storage.StorageObjectsListRequest(bucket=bucket, prefix=prefix)
file_sizes = {}
counter = 0
start_time = time.time()
_LOGGER.info("Starting the size estimation of the input")
while True:
response = self.client.objects.List(request)
for item in response.items:
file_name = 'gs://%s/%s' % (item.bucket, item.name)
file_sizes[file_name] = item.size
counter += 1
if counter % 10000 == 0:
_LOGGER.info("Finished computing size of: %s files", len(file_sizes))
if response.nextPageToken:
request.pageToken = response.nextPageToken
else:
break
_LOGGER.info(
"Finished listing %s files in %s seconds.",
counter,
time.time() - start_time)
return file_sizes
class GcsDownloader(Downloader):
def __init__(self, client, path, buffer_size, get_project_number):
self._client = client
self._path = path
self._bucket, self._name = parse_gcs_path(path)
self._buffer_size = buffer_size
self._get_project_number = get_project_number
project_number = self._get_project_number(self._bucket)
# Create a request count metric
resource = resource_identifiers.GoogleCloudStorageBucket(self._bucket)
labels = {
monitoring_infos.SERVICE_LABEL: 'Storage',
monitoring_infos.METHOD_LABEL: 'Objects.get',
monitoring_infos.RESOURCE_LABEL: resource,
monitoring_infos.GCS_BUCKET_LABEL: self._bucket,
monitoring_infos.GCS_PROJECT_ID_LABEL: str(project_number)
}
service_call_metric = ServiceCallMetric(
request_count_urn=monitoring_infos.API_REQUEST_COUNT_URN,
base_labels=labels)
# Get object state.
self._get_request = (
storage.StorageObjectsGetRequest(
bucket=self._bucket, object=self._name))
try:
metadata = self._get_object_metadata(self._get_request)
service_call_metric.call('ok')
except HttpError as http_error:
service_call_metric.call(http_error)
if http_error.status_code == 404:
raise IOError(errno.ENOENT, 'Not found: %s' % self._path)
else:
_LOGGER.error(
'HTTP error while requesting file %s: %s', self._path, http_error)
raise
self._size = metadata.size
# Ensure read is from file of the correct generation.
self._get_request.generation = metadata.generation
# Initialize read buffer state.
self._download_stream = io.BytesIO()
self._downloader = transfer.Download(
self._download_stream,
auto_transfer=False,
chunksize=self._buffer_size,
num_retries=20)
try:
self._client.objects.Get(self._get_request, download=self._downloader)
service_call_metric.call('ok')
except HttpError as e:
service_call_metric.call(e)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _get_object_metadata(self, get_request):
return self._client.objects.Get(get_request)
@property
def size(self):
return self._size
def get_range(self, start, end):
self._download_stream.seek(0)
self._download_stream.truncate(0)
self._downloader.GetRange(start, end - 1)
return self._download_stream.getvalue()
class GcsUploader(Uploader):
def __init__(self, client, path, mime_type, get_project_number):
self._client = client
self._path = path
self._bucket, self._name = parse_gcs_path(path)
self._mime_type = mime_type
self._get_project_number = get_project_number
# Set up communication with child thread.
parent_conn, child_conn = multiprocessing.Pipe()
self._child_conn = child_conn
self._conn = parent_conn
# Set up uploader.
self._insert_request = (
storage.StorageObjectsInsertRequest(
bucket=self._bucket, name=self._name))
self._upload = transfer.Upload(
PipeStream(self._child_conn),
self._mime_type,
chunksize=WRITE_CHUNK_SIZE)
self._upload.strategy = transfer.RESUMABLE_UPLOAD
# Start uploading thread.
self._upload_thread = threading.Thread(target=self._start_upload)
self._upload_thread.daemon = True
self._upload_thread.last_error = None
self._upload_thread.start()
# TODO(silviuc): Refactor so that retry logic can be applied.
# There is retry logic in the underlying transfer library but we should make
# it more explicit so we can control the retry parameters.
@retry.no_retries # Using no_retries marks this as an integration point.
def _start_upload(self):
# This starts the uploader thread. We are forced to run the uploader in
# another thread because the apitools uploader insists on taking a stream
# as input. Happily, this also means we get asynchronous I/O to GCS.
#
# The uploader by default transfers data in chunks of 1024 * 1024 bytes at
# a time, buffering writes until that size is reached.
project_number = self._get_project_number(self._bucket)
# Create a request count metric
resource = resource_identifiers.GoogleCloudStorageBucket(self._bucket)
labels = {
monitoring_infos.SERVICE_LABEL: 'Storage',
monitoring_infos.METHOD_LABEL: 'Objects.insert',
monitoring_infos.RESOURCE_LABEL: resource,
monitoring_infos.GCS_BUCKET_LABEL: self._bucket,
monitoring_infos.GCS_PROJECT_ID_LABEL: str(project_number)
}
service_call_metric = ServiceCallMetric(
request_count_urn=monitoring_infos.API_REQUEST_COUNT_URN,
base_labels=labels)
try:
self._client.objects.Insert(self._insert_request, upload=self._upload)
service_call_metric.call('ok')
except Exception as e: # pylint: disable=broad-except
service_call_metric.call(e)
_LOGGER.error(
'Error in _start_upload while inserting file %s: %s',
self._path,
traceback.format_exc())
self._upload_thread.last_error = e
finally:
self._child_conn.close()
def put(self, data):
try:
self._conn.send_bytes(data.tobytes())
except EOFError:
if self._upload_thread.last_error is not None:
raise self._upload_thread.last_error # pylint: disable=raising-bad-type
raise
def finish(self):
self._conn.close()
# TODO(udim): Add timeout=DEFAULT_HTTP_TIMEOUT_SECONDS * 2 and raise if
# isAlive is True.
self._upload_thread.join()
# Check for exception since the last put() call.
if self._upload_thread.last_error is not None:
raise self._upload_thread.last_error # pylint: disable=raising-bad-type
|
Misc.py
|
## @file
# Common routines used by all tools
#
# Copyright (c) 2007 - 2019, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
import sys
import string
import threading
import time
import re
import pickle
import array
import shutil
from random import sample
from struct import pack
import uuid
import subprocess
from collections import OrderedDict
import Common.LongFilePathOs as os
from Common import EdkLogger as EdkLogger
from Common import GlobalData as GlobalData
from Common.DataType import *
from Common.BuildToolError import *
from CommonDataClass.DataClass import *
from Common.Parsing import GetSplitValueList
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
from CommonDataClass.Exceptions import BadExpression
from Common.caching import cached_property
## Regular expression used to find out place holders in string template
gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE | re.UNICODE)
## regular expressions for map file processing
startPatternGeneral = re.compile("^Start[' ']+Length[' ']+Name[' ']+Class")
addressPatternGeneral = re.compile("^Address[' ']+Publics by Value[' ']+Rva\+Base")
valuePatternGcc = re.compile('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)$')
pcdPatternGcc = re.compile('^([\da-fA-Fx]+) +([\da-fA-Fx]+)')
secReGeneral = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)
StructPattern = re.compile(r'[_a-zA-Z][0-9A-Za-z_]*$')
## Dictionary used to store dependencies of files
gDependencyDatabase = {} # arch : {file path : [dependent files list]}
#
# If a module is built more than once with different PCDs or library classes
# a temporary INF file with same content is created, the temporary file is removed
# when build exits.
#
_TempInfs = []
def GetVariableOffset(mapfilepath, efifilepath, varnames):
""" Parse map file to get variable offset in current EFI file
@param mapfilepath Map file absolution path
@param efifilepath: EFI binary file full path
@param varnames iteratable container whose elements are variable names to be searched
@return List whos elements are tuple with variable name and raw offset
"""
lines = []
try:
f = open(mapfilepath, 'r')
lines = f.readlines()
f.close()
except:
return None
if len(lines) == 0: return None
firstline = lines[0].strip()
if (firstline.startswith("Archive member included ") and
firstline.endswith(" file (symbol)")):
return _parseForGCC(lines, efifilepath, varnames)
if firstline.startswith("# Path:"):
return _parseForXcode(lines, efifilepath, varnames)
return _parseGeneral(lines, efifilepath, varnames)
def _parseForXcode(lines, efifilepath, varnames):
status = 0
ret = []
for line in lines:
line = line.strip()
if status == 0 and line == "# Symbols:":
status = 1
continue
if status == 1 and len(line) != 0:
for varname in varnames:
if varname in line:
# cannot pregenerate this RegEx since it uses varname from varnames.
m = re.match('^([\da-fA-FxX]+)([\s\S]*)([_]*%s)$' % varname, line)
if m is not None:
ret.append((varname, m.group(1)))
return ret
def _parseForGCC(lines, efifilepath, varnames):
""" Parse map file generated by GCC linker """
status = 0
sections = []
varoffset = []
for index, line in enumerate(lines):
line = line.strip()
# status machine transection
if status == 0 and line == "Memory Configuration":
status = 1
continue
elif status == 1 and line == 'Linker script and memory map':
status = 2
continue
elif status ==2 and line == 'START GROUP':
status = 3
continue
# status handler
if status == 3:
m = valuePatternGcc.match(line)
if m is not None:
sections.append(m.groups(0))
for varname in varnames:
Str = ''
m = re.match("^.data.(%s)" % varname, line)
if m is not None:
m = re.match(".data.(%s)$" % varname, line)
if m is not None:
Str = lines[index + 1]
else:
Str = line[len(".data.%s" % varname):]
if Str:
m = pcdPatternGcc.match(Str.strip())
if m is not None:
varoffset.append((varname, int(m.groups(0)[0], 16), int(sections[-1][1], 16), sections[-1][0]))
if not varoffset:
return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
#redirection
redirection = 0
for efisec in efisecs:
for section in sections:
if section[0].strip() == efisec[0].strip() and section[0].strip() == '.text':
redirection = int(section[1], 16) - efisec[1]
ret = []
for var in varoffset:
for efisec in efisecs:
if var[1] >= efisec[1] and var[1] < efisec[1]+efisec[3]:
ret.append((var[0], hex(efisec[2] + var[1] - efisec[1] - redirection)))
return ret
def _parseGeneral(lines, efifilepath, varnames):
status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
secs = [] # key = section name
varoffset = []
symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$]+) +([\da-fA-F]+)', re.UNICODE)
for line in lines:
line = line.strip()
if startPatternGeneral.match(line):
status = 1
continue
if addressPatternGeneral.match(line):
status = 2
continue
if line.startswith("entry point at"):
status = 3
continue
if status == 1 and len(line) != 0:
m = secReGeneral.match(line)
assert m is not None, "Fail to parse the section in map file , line is %s" % line
sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)
secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])
if status == 2 and len(line) != 0:
for varname in varnames:
m = symRe.match(line)
assert m is not None, "Fail to parse the symbol in map file, line is %s" % line
sec_no, sym_offset, sym_name, vir_addr = m.groups(0)
sec_no = int(sec_no, 16)
sym_offset = int(sym_offset, 16)
vir_addr = int(vir_addr, 16)
# cannot pregenerate this RegEx since it uses varname from varnames.
m2 = re.match('^[_]*(%s)' % varname, sym_name)
if m2 is not None:
# fond a binary pcd entry in map file
for sec in secs:
if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):
varoffset.append([varname, sec[3], sym_offset, vir_addr, sec_no])
if not varoffset: return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
ret = []
for var in varoffset:
index = 0
for efisec in efisecs:
index = index + 1
if var[1].strip() == efisec[0].strip():
ret.append((var[0], hex(efisec[2] + var[2])))
elif var[4] == index:
ret.append((var[0], hex(efisec[2] + var[2])))
return ret
## Routine to process duplicated INF
#
# This function is called by following two cases:
# Case 1 in DSC:
# [components.arch]
# Pkg/module/module.inf
# Pkg/module/module.inf {
# <Defines>
# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836
# }
# Case 2 in FDF:
# INF Pkg/module/module.inf
# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf
#
# This function copies Pkg/module/module.inf to
# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf
#
# @param Path Original PathClass object
# @param BaseName New file base name
#
# @retval return the new PathClass object
#
def ProcessDuplicatedInf(Path, BaseName, Workspace):
Filename = os.path.split(Path.File)[1]
if '.' in Filename:
Filename = BaseName + Path.BaseName + Filename[Filename.rfind('.'):]
else:
Filename = BaseName + Path.BaseName
#
# If -N is specified on command line, cache is disabled
# The directory has to be created
#
DbDir = os.path.split(GlobalData.gDatabasePath)[0]
if not os.path.exists(DbDir):
os.makedirs(DbDir)
#
# A temporary INF is copied to database path which must have write permission
# The temporary will be removed at the end of build
# In case of name conflict, the file name is
# FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)
#
TempFullPath = os.path.join(DbDir,
Filename)
RtPath = PathClass(Path.File, Workspace)
#
# Modify the full path to temporary path, keep other unchanged
#
# To build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path
# in DSC which is used as relative path by C files and other files in INF.
# A trick was used: all module paths are PathClass instances, after the initialization
# of PathClass, the PathClass.Path is overridden by the temporary INF path.
#
# The reason for creating a temporary INF is:
# Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,
# the key is the full path of INF, the value is an object to save overridden library instances, PCDs.
# A different key for the same module is needed to create different output directory,
# retrieve overridden PCDs, library instances.
#
# The BaseName is the FILE_GUID which is also the output directory name.
#
#
RtPath.Path = TempFullPath
RtPath.BaseName = BaseName
#
# If file exists, compare contents
#
if os.path.exists(TempFullPath):
with open(str(Path), 'rb') as f1, open(TempFullPath, 'rb') as f2:
if f1.read() == f2.read():
return RtPath
_TempInfs.append(TempFullPath)
shutil.copy2(str(Path), TempFullPath)
return RtPath
## Remove temporary created INFs whose paths were saved in _TempInfs
#
def ClearDuplicatedInf():
while _TempInfs:
File = _TempInfs.pop()
if os.path.exists(File):
os.remove(File)
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
#
# @param Guid The GUID string
#
# @retval string The GUID string in C structure style
#
def GuidStringToGuidStructureString(Guid):
GuidList = Guid.split('-')
Result = '{'
for Index in range(0, 3, 1):
Result = Result + '0x' + GuidList[Index] + ', '
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
for Index in range(0, 12, 2):
Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
Result += '}}'
return Result
## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in byte array
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureByteArrayToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 16:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[3], 16),
int(guidValueList[2], 16),
int(guidValueList[1], 16),
int(guidValueList[0], 16),
int(guidValueList[5], 16),
int(guidValueList[4], 16),
int(guidValueList[7], 16),
int(guidValueList[6], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16),
int(guidValueList[11], 16),
int(guidValueList[12], 16),
int(guidValueList[13], 16),
int(guidValueList[14], 16),
int(guidValueList[15], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureStringToGuidString(GuidValue):
if not GlobalData.gGuidCFormatPattern.match(GuidValue):
return ''
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
#
def GuidStructureStringToGuidValueName(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
EdkLogger.error(None, FORMAT_INVALID, "Invalid GUID value string [%s]" % GuidValue)
return "%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
## Create directories
#
# @param Directory The directory name
#
def CreateDirectory(Directory):
if Directory is None or Directory.strip() == "":
return True
try:
if not os.access(Directory, os.F_OK):
os.makedirs(Directory)
except:
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory The directory name
#
def RemoveDirectory(Directory, Recursively=False):
if Directory is None or Directory.strip() == "" or not os.path.exists(Directory):
return
if Recursively:
CurrentDirectory = os.getcwd()
os.chdir(Directory)
for File in os.listdir("."):
if os.path.isdir(File):
RemoveDirectory(File, Recursively)
else:
os.remove(File)
os.chdir(CurrentDirectory)
os.rmdir(Directory)
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what won't.
#
# @param File The path of file
# @param Content The new content of the file
# @param IsBinaryFile The flag indicating if the file is binary file or not
#
# @retval True If the file content is changed and the file is renewed
# @retval False If the file content is the same
#
def SaveFileOnChange(File, Content, IsBinaryFile=True):
if not IsBinaryFile:
Content = Content.replace("\n", os.linesep)
if os.path.exists(File):
try:
if Content == open(File, "rb").read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
DirName = os.path.dirname(File)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
try:
Fd = open(File, "wb")
Fd.write(Content)
Fd.close()
except IOError as X:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
return True
## Retrieve and cache the real path name in file system
#
# @param Root The root directory of path relative to
#
# @retval str The path string if the path exists
# @retval None If path doesn't exist
#
class DirCache:
_CACHE_ = set()
_UPPER_CACHE_ = {}
def __init__(self, Root):
self._Root = Root
for F in os.listdir(Root):
self._CACHE_.add(F)
self._UPPER_CACHE_[F.upper()] = F
# =[] operator
def __getitem__(self, Path):
Path = Path[len(os.path.commonprefix([Path, self._Root])):]
if not Path:
return self._Root
if Path and Path[0] == os.path.sep:
Path = Path[1:]
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
UpperPath = Path.upper()
if UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
IndexList = []
LastSepIndex = -1
SepIndex = Path.find(os.path.sep)
while SepIndex > -1:
Parent = UpperPath[:SepIndex]
if Parent not in self._UPPER_CACHE_:
break
LastSepIndex = SepIndex
SepIndex = Path.find(os.path.sep, LastSepIndex + 1)
if LastSepIndex == -1:
return None
Cwd = os.getcwd()
os.chdir(self._Root)
SepIndex = LastSepIndex
while SepIndex > -1:
Parent = Path[:SepIndex]
ParentKey = UpperPath[:SepIndex]
if ParentKey not in self._UPPER_CACHE_:
os.chdir(Cwd)
return None
if Parent in self._CACHE_:
ParentDir = Parent
else:
ParentDir = self._UPPER_CACHE_[ParentKey]
for F in os.listdir(ParentDir):
Dir = os.path.join(ParentDir, F)
self._CACHE_.add(Dir)
self._UPPER_CACHE_[Dir.upper()] = Dir
SepIndex = Path.find(os.path.sep, SepIndex + 1)
os.chdir(Cwd)
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
elif UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
return None
def RealPath(File, Dir='', OverrideDir=''):
NewFile = os.path.normpath(os.path.join(Dir, File))
NewFile = GlobalData.gAllFiles[NewFile]
if not NewFile and OverrideDir:
NewFile = os.path.normpath(os.path.join(OverrideDir, File))
NewFile = GlobalData.gAllFiles[NewFile]
return NewFile
## Get GUID value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def GuidValue(CName, PackageList, Inffile = None):
for P in PackageList:
GuidKeys = P.Guids.keys()
if Inffile and P._PrivateGuids:
if not Inffile.startswith(P.MetaFile.Dir):
GuidKeys = [x for x in P.Guids if x not in P._PrivateGuids]
if CName in GuidKeys:
return P.Guids[CName]
return None
## A string template class
#
# This class implements a template for string replacement. A string template
# looks like following
#
# ${BEGIN} other_string ${placeholder_name} other_string ${END}
#
# The string between ${BEGIN} and ${END} will be repeated as many times as the
# length of "placeholder_name", which is a list passed through a dict. The
# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
# be not used and, in this case, the "placeholder_name" must not a list and it
# will just be replaced once.
#
class TemplateString(object):
_REPEAT_START_FLAG = "BEGIN"
_REPEAT_END_FLAG = "END"
class Section(object):
_LIST_TYPES = [type([]), type(set()), type((0,))]
def __init__(self, TemplateSection, PlaceHolderList):
self._Template = TemplateSection
self._PlaceHolderList = []
# Split the section into sub-sections according to the position of placeholders
if PlaceHolderList:
self._SubSectionList = []
SubSectionStart = 0
#
# The placeholders passed in must be in the format of
#
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
#
for PlaceHolder, Start, End in PlaceHolderList:
self._SubSectionList.append(TemplateSection[SubSectionStart:Start])
self._SubSectionList.append(TemplateSection[Start:End])
self._PlaceHolderList.append(PlaceHolder)
SubSectionStart = End
if SubSectionStart < len(TemplateSection):
self._SubSectionList.append(TemplateSection[SubSectionStart:])
else:
self._SubSectionList = [TemplateSection]
def __str__(self):
return self._Template + " : " + str(self._PlaceHolderList)
def Instantiate(self, PlaceHolderValues):
RepeatTime = -1
RepeatPlaceHolders = {}
NonRepeatPlaceHolders = {}
for PlaceHolder in self._PlaceHolderList:
if PlaceHolder not in PlaceHolderValues:
continue
Value = PlaceHolderValues[PlaceHolder]
if type(Value) in self._LIST_TYPES:
if RepeatTime < 0:
RepeatTime = len(Value)
elif RepeatTime != len(Value):
EdkLogger.error(
"TemplateString",
PARAMETER_INVALID,
"${%s} has different repeat time from others!" % PlaceHolder,
ExtraData=str(self._Template)
)
RepeatPlaceHolders["${%s}" % PlaceHolder] = Value
else:
NonRepeatPlaceHolders["${%s}" % PlaceHolder] = Value
if NonRepeatPlaceHolders:
StringList = []
for S in self._SubSectionList:
if S not in NonRepeatPlaceHolders:
StringList.append(S)
else:
StringList.append(str(NonRepeatPlaceHolders[S]))
else:
StringList = self._SubSectionList
if RepeatPlaceHolders:
TempStringList = []
for Index in range(RepeatTime):
for S in StringList:
if S not in RepeatPlaceHolders:
TempStringList.append(S)
else:
TempStringList.append(str(RepeatPlaceHolders[S][Index]))
StringList = TempStringList
return "".join(StringList)
## Constructor
def __init__(self, Template=None):
self.String = []
self.IsBinary = False
self._Template = Template
self._TemplateSectionList = self._Parse(Template)
## str() operator
#
# @retval string The string replaced
#
def __str__(self):
return "".join(self.String)
## Split the template string into fragments per the ${BEGIN} and ${END} flags
#
# @retval list A list of TemplateString.Section objects
#
def _Parse(self, Template):
SectionStart = 0
SearchFrom = 0
MatchEnd = 0
PlaceHolderList = []
TemplateSectionList = []
while Template:
MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
if not MatchObj:
if MatchEnd <= len(Template):
TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
break
MatchString = MatchObj.group(1)
MatchStart = MatchObj.start()
MatchEnd = MatchObj.end()
if MatchString == self._REPEAT_START_FLAG:
if MatchStart > SectionStart:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
elif MatchString == self._REPEAT_END_FLAG:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
else:
PlaceHolderList.append((MatchString, MatchStart - SectionStart, MatchEnd - SectionStart))
SearchFrom = MatchEnd
return TemplateSectionList
## Replace the string template with dictionary of placeholders and append it to previous one
#
# @param AppendString The string template to append
# @param Dictionary The placeholder dictionaries
#
def Append(self, AppendString, Dictionary=None):
if Dictionary:
SectionList = self._Parse(AppendString)
self.String.append( "".join(S.Instantiate(Dictionary) for S in SectionList))
else:
if isinstance(AppendString,list):
self.String.extend(AppendString)
else:
self.String.append(AppendString)
## Replace the string template with dictionary of placeholders
#
# @param Dictionary The placeholder dictionaries
#
# @retval str The string replaced with placeholder values
#
def Replace(self, Dictionary=None):
return "".join(S.Instantiate(Dictionary) for S in self._TemplateSectionList)
## Progress indicator class
#
# This class makes use of thread to print progress on console.
#
class Progressor:
# for avoiding deadloop
_StopFlag = None
_ProgressThread = None
_CheckInterval = 0.25
## Constructor
#
# @param OpenMessage The string printed before progress charaters
# @param CloseMessage The string printed after progress charaters
# @param ProgressChar The charater used to indicate the progress
# @param Interval The interval in seconds between two progress charaters
#
def __init__(self, OpenMessage="", CloseMessage="", ProgressChar='.', Interval=1.0):
self.PromptMessage = OpenMessage
self.CodaMessage = CloseMessage
self.ProgressChar = ProgressChar
self.Interval = Interval
if Progressor._StopFlag is None:
Progressor._StopFlag = threading.Event()
## Start to print progress charater
#
# @param OpenMessage The string printed before progress charaters
#
def Start(self, OpenMessage=None):
if OpenMessage is not None:
self.PromptMessage = OpenMessage
Progressor._StopFlag.clear()
if Progressor._ProgressThread is None:
Progressor._ProgressThread = threading.Thread(target=self._ProgressThreadEntry)
Progressor._ProgressThread.setDaemon(False)
Progressor._ProgressThread.start()
## Stop printing progress charater
#
# @param CloseMessage The string printed after progress charaters
#
def Stop(self, CloseMessage=None):
OriginalCodaMessage = self.CodaMessage
if CloseMessage is not None:
self.CodaMessage = CloseMessage
self.Abort()
self.CodaMessage = OriginalCodaMessage
## Thread entry method
def _ProgressThreadEntry(self):
sys.stdout.write(self.PromptMessage + " ")
sys.stdout.flush()
TimeUp = 0.0
while not Progressor._StopFlag.isSet():
if TimeUp <= 0.0:
sys.stdout.write(self.ProgressChar)
sys.stdout.flush()
TimeUp = self.Interval
time.sleep(self._CheckInterval)
TimeUp -= self._CheckInterval
sys.stdout.write(" " + self.CodaMessage + "\n")
sys.stdout.flush()
## Abort the progress display
@staticmethod
def Abort():
if Progressor._StopFlag is not None:
Progressor._StopFlag.set()
if Progressor._ProgressThread is not None:
Progressor._ProgressThread.join()
Progressor._ProgressThread = None
## A dict which can access its keys and/or values orderly
#
# The class implements a new kind of dict which its keys or values can be
# accessed in the order they are added into the dict. It guarantees the order
# by making use of an internal list to keep a copy of keys.
#
class sdict(dict):
## Constructor
def __init__(self):
dict.__init__(self)
self._key_list = []
## [] operator
def __setitem__(self, key, value):
if key not in self._key_list:
self._key_list.append(key)
dict.__setitem__(self, key, value)
## del operator
def __delitem__(self, key):
self._key_list.remove(key)
dict.__delitem__(self, key)
## used in "for k in dict" loop to ensure the correct order
def __iter__(self):
return self.iterkeys()
## len() support
def __len__(self):
return len(self._key_list)
## "in" test support
def __contains__(self, key):
return key in self._key_list
## indexof support
def index(self, key):
return self._key_list.index(key)
## insert support
def insert(self, key, newkey, newvalue, order):
index = self._key_list.index(key)
if order == 'BEFORE':
self._key_list.insert(index, newkey)
dict.__setitem__(self, newkey, newvalue)
elif order == 'AFTER':
self._key_list.insert(index + 1, newkey)
dict.__setitem__(self, newkey, newvalue)
## append support
def append(self, sdict):
for key in sdict:
if key not in self._key_list:
self._key_list.append(key)
dict.__setitem__(self, key, sdict[key])
def has_key(self, key):
return key in self._key_list
## Empty the dict
def clear(self):
self._key_list = []
dict.clear(self)
## Return a copy of keys
def keys(self):
keys = []
for key in self._key_list:
keys.append(key)
return keys
## Return a copy of values
def values(self):
values = []
for key in self._key_list:
values.append(self[key])
return values
## Return a copy of (key, value) list
def items(self):
items = []
for key in self._key_list:
items.append((key, self[key]))
return items
## Iteration support
def iteritems(self):
return iter(self.items())
## Keys interation support
def iterkeys(self):
return iter(self.keys())
## Values interation support
def itervalues(self):
return iter(self.values())
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
value = None
if key in self._key_list:
value = self[key]
self.__delitem__(key)
elif len(dv) != 0 :
value = kv[0]
return value
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
key = self._key_list[-1]
value = self[key]
self.__delitem__(key)
return key, value
def update(self, dict=None, **kwargs):
if dict is not None:
for k, v in dict.items():
self[k] = v
if len(kwargs):
for k, v in kwargs.items():
self[k] = v
## Dictionary using prioritized list as key
#
class tdict:
_ListType = type([])
_TupleType = type(())
_Wildcard = 'COMMON'
_ValidWildcardList = ['COMMON', 'DEFAULT', 'ALL', TAB_STAR, 'PLATFORM']
def __init__(self, _Single_=False, _Level_=2):
self._Level_ = _Level_
self.data = {}
self._Single_ = _Single_
# =[] operator
def __getitem__(self, key):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
elif self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey is None or str(FirstKey).upper() in self._ValidWildcardList:
FirstKey = self._Wildcard
if self._Single_:
return self._GetSingleValue(FirstKey, RestKeys)
else:
return self._GetAllValues(FirstKey, RestKeys)
def _GetSingleValue(self, FirstKey, RestKeys):
Value = None
#print "%s-%s" % (FirstKey, self._Level_) ,
if self._Level_ > 1:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None:
for Key in self.data:
Value = self.data[Key][RestKeys]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None and self._Wildcard in self.data:
#print "Value=None"
Value = self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey]
if Value is None:
for Key in self.data:
Value = self.data[Key]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey]
elif self._Wildcard in self.data:
Value = self.data[self._Wildcard]
return Value
def _GetAllValues(self, FirstKey, RestKeys):
Value = []
if self._Level_ > 1:
if FirstKey == self._Wildcard:
for Key in self.data:
Value += self.data[Key][RestKeys]
else:
if FirstKey in self.data:
Value += self.data[FirstKey][RestKeys]
if self._Wildcard in self.data:
Value += self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
for Key in self.data:
Value.append(self.data[Key])
else:
if FirstKey in self.data:
Value.append(self.data[FirstKey])
if self._Wildcard in self.data:
Value.append(self.data[self._Wildcard])
return Value
## []= operator
def __setitem__(self, key, value):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
else:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey in self._ValidWildcardList:
FirstKey = self._Wildcard
if FirstKey not in self.data and self._Level_ > 0:
self.data[FirstKey] = tdict(self._Single_, self._Level_ - 1)
if self._Level_ > 1:
self.data[FirstKey][RestKeys] = value
else:
self.data[FirstKey] = value
def SetGreedyMode(self):
self._Single_ = False
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetGreedyMode()
def SetSingleMode(self):
self._Single_ = True
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetSingleMode()
def GetKeys(self, KeyIndex=0):
assert KeyIndex >= 0
if KeyIndex == 0:
return set(self.data.keys())
else:
keys = set()
for Key in self.data:
keys |= self.data[Key].GetKeys(KeyIndex - 1)
return keys
def AnalyzePcdExpression(Setting):
RanStr = ''.join(sample(string.ascii_letters + string.digits, 8))
Setting = Setting.replace('\\\\', RanStr).strip()
# There might be escaped quote in a string: \", \\\" , \', \\\'
Data = Setting
# There might be '|' in string and in ( ... | ... ), replace it with '-'
NewStr = ''
InSingleQuoteStr = False
InDoubleQuoteStr = False
Pair = 0
for Index, ch in enumerate(Data):
if ch == '"' and not InSingleQuoteStr:
if Data[Index - 1] != '\\':
InDoubleQuoteStr = not InDoubleQuoteStr
elif ch == "'" and not InDoubleQuoteStr:
if Data[Index - 1] != '\\':
InSingleQuoteStr = not InSingleQuoteStr
elif ch == '(' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair += 1
elif ch == ')' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair -= 1
if (Pair > 0 or InSingleQuoteStr or InDoubleQuoteStr) and ch == TAB_VALUE_SPLIT:
NewStr += '-'
else:
NewStr += ch
FieldList = []
StartPos = 0
while True:
Pos = NewStr.find(TAB_VALUE_SPLIT, StartPos)
if Pos < 0:
FieldList.append(Setting[StartPos:].strip())
break
FieldList.append(Setting[StartPos:Pos].strip())
StartPos = Pos + 1
for i, ch in enumerate(FieldList):
if RanStr in ch:
FieldList[i] = ch.replace(RanStr,'\\\\')
return FieldList
def ParseFieldValue (Value):
def ParseDevPathValue (Value):
if '\\' in Value:
Value.replace('\\', '/').replace(' ', '')
Cmd = 'DevicePath ' + '"' + Value + '"'
try:
p = subprocess.Popen(Cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
except Exception as X:
raise BadExpression("DevicePath: %s" % (str(X)) )
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
if err:
raise BadExpression("DevicePath: %s" % str(err))
Size = len(out.split())
out = ','.join(out.split())
return '{' + out + '}', Size
if "{CODE(" in Value:
return Value, len(Value.split(","))
if isinstance(Value, type(0)):
return Value, (Value.bit_length() + 7) / 8
if not isinstance(Value, type('')):
raise BadExpression('Type %s is %s' %(Value, type(Value)))
Value = Value.strip()
if Value.startswith(TAB_UINT8) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 1:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 1
if Value.startswith(TAB_UINT16) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 2:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 2
if Value.startswith(TAB_UINT32) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 4:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 4
if Value.startswith(TAB_UINT64) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 8:
raise BadExpression('Value (%s) Size larger than %d' % (Value, Size))
return Value, 8
if Value.startswith(TAB_GUID) and Value.endswith(')'):
Value = Value.split('(', 1)[1][:-1].strip()
if Value[0] == '{' and Value[-1] == '}':
TmpValue = GuidStructureStringToGuidString(Value)
if not TmpValue:
raise BadExpression("Invalid GUID value string %s" % Value)
Value = TmpValue
if Value[0] == '"' and Value[-1] == '"':
Value = Value[1:-1]
try:
Value = "'" + uuid.UUID(Value).get_bytes_le() + "'"
except ValueError as Message:
raise BadExpression(Message)
Value, Size = ParseFieldValue(Value)
return Value, 16
if Value.startswith('L"') and Value.endswith('"'):
# Unicode String
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, (len(List) + 1) * 2
if Value.startswith('"') and Value.endswith('"'):
# ASCII String
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List) + 1
if Value.startswith("L'") and Value.endswith("'"):
# Unicode Character Constant
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, len(List) * 2
if Value.startswith("'") and Value.endswith("'"):
# Character constant
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List)
if Value.startswith('{') and Value.endswith('}'):
# Byte array
Value = Value[1:-1]
List = [Item.strip() for Item in Value.split(',')]
List.reverse()
Value = 0
RetSize = 0
for Item in List:
ItemValue, Size = ParseFieldValue(Item)
RetSize += Size
for I in range(Size):
Value = (Value << 8) | ((ItemValue >> 8 * I) & 0xff)
return Value, RetSize
if Value.startswith('DEVICE_PATH(') and Value.endswith(')'):
Value = Value.replace("DEVICE_PATH(", '').rstrip(')')
Value = Value.strip().strip('"')
return ParseDevPathValue(Value)
if Value.lower().startswith('0x'):
try:
Value = int(Value, 16)
except:
raise BadExpression("invalid hex value: %s" % Value)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) / 8
if Value[0].isdigit():
Value = int(Value, 10)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) / 8
if Value.lower() == 'true':
return 1, 1
if Value.lower() == 'false':
return 0, 1
return Value, 1
## AnalyzeDscPcd
#
# Analyze DSC PCD value, since there is no data type info in DSC
# This function is used to match functions (AnalyzePcdData) used for retrieving PCD value from database
# 1. Feature flag: TokenSpace.PcdCName|PcdValue
# 2. Fix and Patch:TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# 3. Dynamic default:
# TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# TokenSpace.PcdCName|PcdValue
# 4. Dynamic VPD:
# TokenSpace.PcdCName|VpdOffset[|VpdValue]
# TokenSpace.PcdCName|VpdOffset[|MaxSize[|VpdValue]]
# 5. Dynamic HII:
# TokenSpace.PcdCName|HiiString|VariableGuid|VariableOffset[|HiiValue]
# PCD value needs to be located in such kind of string, and the PCD value might be an expression in which
# there might have "|" operator, also in string value.
#
# @param Setting: String contain information described above with "TokenSpace.PcdCName|" stripped
# @param PcdType: PCD type: feature, fixed, dynamic default VPD HII
# @param DataType: The datum type of PCD: VOID*, UNIT, BOOL
# @retval:
# ValueList: A List contain fields described above
# IsValid: True if conforming EBNF, otherwise False
# Index: The index where PcdValue is in ValueList
#
def AnalyzeDscPcd(Setting, PcdType, DataType=''):
FieldList = AnalyzePcdExpression(Setting)
IsValid = True
if PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT):
Value = FieldList[0]
Size = ''
if len(FieldList) > 1 and FieldList[1]:
DataType = FieldList[1]
if FieldList[1] != TAB_VOID and StructPattern.match(FieldList[1]) is None:
IsValid = False
if len(FieldList) > 2:
Size = FieldList[2]
if IsValid:
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [str(Value), DataType, str(Size)], IsValid, 0
elif PcdType == MODEL_PCD_FEATURE_FLAG:
Value = FieldList[0]
Size = ''
IsValid = (len(FieldList) <= 1)
return [Value, DataType, str(Size)], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
VpdOffset = FieldList[0]
Value = Size = ''
if not DataType == TAB_VOID:
if len(FieldList) > 1:
Value = FieldList[1]
else:
if len(FieldList) > 1:
Size = FieldList[1]
if len(FieldList) > 2:
Value = FieldList[2]
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [VpdOffset, str(Size), Value], IsValid, 2
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
IsValid = (3 <= len(FieldList) <= 5)
HiiString = FieldList[0]
Guid = Offset = Value = Attribute = ''
if len(FieldList) > 1:
Guid = FieldList[1]
if len(FieldList) > 2:
Offset = FieldList[2]
if len(FieldList) > 3:
Value = FieldList[3]
if len(FieldList) > 4:
Attribute = FieldList[4]
return [HiiString, Guid, Offset, Value, Attribute], IsValid, 3
return [], False, 0
## AnalyzePcdData
#
# Analyze the pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
def AnalyzePcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[0] = PtrValue[0]
return ValueList
## check format of PCD value against its the datum type
#
# For PCD value setting
#
def CheckPcdDatum(Type, Value):
if Type == TAB_VOID:
ValueRe = re.compile(r'\s*L?\".*\"\s*$')
if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))
or (Value.startswith('{') and Value.endswith('}')) or (Value.startswith("L'") or Value.startswith("'") and Value.endswith("'"))
):
return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\
", \"...\" or \'...\' for string, L\"...\" or L\'...\' for unicode string" % (Value, Type)
elif ValueRe.match(Value):
# Check the chars in UnicodeString or CString is printable
if Value.startswith("L"):
Value = Value[2:-1]
else:
Value = Value[1:-1]
Printset = set(string.printable)
Printset.remove(TAB_PRINTCHAR_VT)
Printset.add(TAB_PRINTCHAR_BS)
Printset.add(TAB_PRINTCHAR_NUL)
if not set(Value).issubset(Printset):
PrintList = sorted(Printset)
return False, "Invalid PCD string value of type [%s]; must be printable chars %s." % (Type, PrintList)
elif Type == 'BOOLEAN':
if Value not in ['TRUE', 'True', 'true', '0x1', '0x01', '1', 'FALSE', 'False', 'false', '0x0', '0x00', '0']:
return False, "Invalid value [%s] of type [%s]; must be one of TRUE, True, true, 0x1, 0x01, 1"\
", FALSE, False, false, 0x0, 0x00, 0" % (Value, Type)
elif Type in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64]:
if Value and int(Value, 0) < 0:
return False, "PCD can't be set to negative value[%s] for datum type [%s]" % (Value, Type)
try:
Value = long(Value, 0)
if Value > MAX_VAL_TYPE[Type]:
return False, "Too large PCD value[%s] for datum type [%s]" % (Value, Type)
except:
return False, "Invalid value [%s] of type [%s];"\
" must be a hexadecimal, decimal or octal in C language format." % (Value, Type)
else:
return True, "StructurePcd"
return True, ""
def CommonPath(PathList):
P1 = min(PathList).split(os.path.sep)
P2 = max(PathList).split(os.path.sep)
for Index in xrange(min(len(P1), len(P2))):
if P1[Index] != P2[Index]:
return os.path.sep.join(P1[:Index])
return os.path.sep.join(P1)
class PathClass(object):
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):
self.Arch = Arch
self.File = str(File)
if os.path.isabs(self.File):
self.Root = ''
self.AlterRoot = ''
else:
self.Root = str(Root)
self.AlterRoot = str(AlterRoot)
# Remove any '.' and '..' in path
if self.Root:
self.Root = mws.getWs(self.Root, self.File)
self.Path = os.path.normpath(os.path.join(self.Root, self.File))
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
# eliminate the side-effect of 'C:'
if self.Root[-1] == ':':
self.Root += os.path.sep
# file path should not start with path separator
if self.Root[-1] == os.path.sep:
self.File = self.Path[len(self.Root):]
else:
self.File = self.Path[len(self.Root) + 1:]
else:
self.Path = os.path.normpath(self.File)
self.SubDir, self.Name = os.path.split(self.File)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.Root:
if self.SubDir:
self.Dir = os.path.join(self.Root, self.SubDir)
else:
self.Dir = self.Root
else:
self.Dir = self.SubDir
if IsBinary:
self.Type = Type
else:
self.Type = self.Ext.lower()
self.IsBinary = IsBinary
self.Target = Target
self.TagName = TagName
self.ToolCode = ToolCode
self.ToolChainFamily = ToolChainFamily
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
# @retval string Formatted String
#
def __str__(self):
return self.Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
# @retval False The two PathClass are different
# @retval True The two PathClass are the same
#
def __eq__(self, Other):
return self.Path == str(Other)
## Override __cmp__ function
#
# Customize the comparsion operation of two PathClass
#
# @retval 0 The two PathClass are different
# @retval -1 The first PathClass is less than the second PathClass
# @retval 1 The first PathClass is Bigger than the second PathClass
def __cmp__(self, Other):
OtherKey = str(Other)
SelfKey = self.Path
if SelfKey == OtherKey:
return 0
elif SelfKey > OtherKey:
return 1
else:
return -1
## Override __hash__ function
#
# Use Path as key in hash table
#
# @retval string Key for hash table
#
def __hash__(self):
return hash(self.Path)
@cached_property
def Key(self):
return self.Path.upper()
@property
def TimeStamp(self):
return os.stat(self.Path)[8]
def Validate(self, Type='', CaseSensitive=True):
def RealPath2(File, Dir='', OverrideDir=''):
NewFile = None
if OverrideDir:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile:
if OverrideDir[-1] == os.path.sep:
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
else:
return NewFile[len(OverrideDir) + 1:], NewFile[0:len(OverrideDir)]
if GlobalData.gAllFiles:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]
if not NewFile:
NewFile = os.path.normpath(os.path.join(Dir, File))
if not os.path.exists(NewFile):
return None, None
if NewFile:
if Dir:
if Dir[-1] == os.path.sep:
return NewFile[len(Dir):], NewFile[0:len(Dir)]
else:
return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
else:
return NewFile, ''
return None, None
if GlobalData.gCaseInsensitive:
CaseSensitive = False
if Type and Type.lower() != self.Type:
return FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % (self.File, Type, self.Type)
RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
if not RealRoot and not RealFile:
RealFile = self.File
if self.AlterRoot:
RealFile = os.path.join(self.AlterRoot, self.File)
elif self.Root:
RealFile = os.path.join(self.Root, self.File)
if len (mws.getPkgPath()) == 0:
return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
else:
return FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.File, '\n\t'.join(mws.getPkgPath()))
ErrorCode = 0
ErrorInfo = ''
if RealRoot != self.Root or RealFile != self.File:
if CaseSensitive and (RealFile != self.File or (RealRoot != self.Root and RealRoot != self.AlterRoot)):
ErrorCode = FILE_CASE_MISMATCH
ErrorInfo = self.File + '\n\t' + RealFile + " [in file system]"
self.SubDir, self.Name = os.path.split(RealFile)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.SubDir:
self.Dir = os.path.join(RealRoot, self.SubDir)
else:
self.Dir = RealRoot
self.File = RealFile
self.Root = RealRoot
self.Path = os.path.join(RealRoot, RealFile)
return ErrorCode, ErrorInfo
## Parse PE image to get the required PE informaion.
#
class PeImageClass():
## Constructor
#
# @param File FilePath of PeImage
#
def __init__(self, PeFile):
self.FileName = PeFile
self.IsValid = False
self.Size = 0
self.EntryPoint = 0
self.SectionAlignment = 0
self.SectionHeaderList = []
self.ErrorInfo = ''
try:
PeObject = open(PeFile, 'rb')
except:
self.ErrorInfo = self.FileName + ' can not be found\n'
return
# Read DOS header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x3E)
ByteList = ByteArray.tolist()
# DOS signature should be 'MZ'
if self._ByteListToStr (ByteList[0x0:0x2]) != 'MZ':
self.ErrorInfo = self.FileName + ' has no valid DOS signature MZ'
return
# Read 4 byte PE Signature
PeOffset = self._ByteListToInt(ByteList[0x3C:0x3E])
PeObject.seek(PeOffset)
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 4)
# PE signature should be 'PE\0\0'
if ByteArray.tostring() != 'PE\0\0':
self.ErrorInfo = self.FileName + ' has no valid PE signature PE00'
return
# Read PE file header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x14)
ByteList = ByteArray.tolist()
SecNumber = self._ByteListToInt(ByteList[0x2:0x4])
if SecNumber == 0:
self.ErrorInfo = self.FileName + ' has no section header'
return
# Read PE optional header
OptionalHeaderSize = self._ByteListToInt(ByteArray[0x10:0x12])
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, OptionalHeaderSize)
ByteList = ByteArray.tolist()
self.EntryPoint = self._ByteListToInt(ByteList[0x10:0x14])
self.SectionAlignment = self._ByteListToInt(ByteList[0x20:0x24])
self.Size = self._ByteListToInt(ByteList[0x38:0x3C])
# Read each Section Header
for Index in range(SecNumber):
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x28)
ByteList = ByteArray.tolist()
SecName = self._ByteListToStr(ByteList[0:8])
SecVirtualSize = self._ByteListToInt(ByteList[8:12])
SecRawAddress = self._ByteListToInt(ByteList[20:24])
SecVirtualAddress = self._ByteListToInt(ByteList[12:16])
self.SectionHeaderList.append((SecName, SecVirtualAddress, SecRawAddress, SecVirtualSize))
self.IsValid = True
PeObject.close()
def _ByteListToStr(self, ByteList):
String = ''
for index in range(len(ByteList)):
if ByteList[index] == 0:
break
String += chr(ByteList[index])
return String
def _ByteListToInt(self, ByteList):
Value = 0
for index in range(len(ByteList) - 1, -1, -1):
Value = (Value << 8) | int(ByteList[index])
return Value
class DefaultStore():
def __init__(self, DefaultStores ):
self.DefaultStores = DefaultStores
def DefaultStoreID(self, DefaultStoreName):
for key, value in self.DefaultStores.items():
if value == DefaultStoreName:
return key
return None
def GetDefaultDefault(self):
if not self.DefaultStores or "0" in self.DefaultStores:
return "0", TAB_DEFAULT_STORES_DEFAULT
else:
minvalue = min(int(value_str) for value_str in self.DefaultStores)
return (str(minvalue), self.DefaultStores[str(minvalue)])
def GetMin(self, DefaultSIdList):
if not DefaultSIdList:
return TAB_DEFAULT_STORES_DEFAULT
storeidset = {storeid for storeid, storename in self.DefaultStores.values() if storename in DefaultSIdList}
if not storeidset:
return ""
minid = min(storeidset )
for sid, name in self.DefaultStores.values():
if sid == minid:
return name
class SkuClass():
DEFAULT = 0
SINGLE = 1
MULTIPLE =2
def __init__(self,SkuIdentifier='', SkuIds=None):
if SkuIds is None:
SkuIds = {}
for SkuName in SkuIds:
SkuId = SkuIds[SkuName][0]
skuid_num = int(SkuId, 16) if SkuId.upper().startswith("0X") else int(SkuId)
if skuid_num > 0xFFFFFFFFFFFFFFFF:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] value %s exceeds the max value of UINT64"
% (SkuName, SkuId))
self.AvailableSkuIds = sdict()
self.SkuIdSet = []
self.SkuIdNumberSet = []
self.SkuData = SkuIds
self._SkuInherit = {}
self._SkuIdentifier = SkuIdentifier
if SkuIdentifier == '' or SkuIdentifier is None:
self.SkuIdSet = ['DEFAULT']
self.SkuIdNumberSet = ['0U']
elif SkuIdentifier == 'ALL':
self.SkuIdSet = SkuIds.keys()
self.SkuIdNumberSet = [num[0].strip() + 'U' for num in SkuIds.values()]
else:
r = SkuIdentifier.split('|')
self.SkuIdSet=[(r[k].strip()).upper() for k in range(len(r))]
k = None
try:
self.SkuIdNumberSet = [SkuIds[k][0].strip() + 'U' for k in self.SkuIdSet]
except Exception:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (k, " | ".join(SkuIds.keys())))
for each in self.SkuIdSet:
if each in SkuIds:
self.AvailableSkuIds[each] = SkuIds[each][0]
else:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData="SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (each, " | ".join(SkuIds.keys())))
if self.SkuUsageType != SkuClass.SINGLE:
self.AvailableSkuIds.update({'DEFAULT':0, 'COMMON':0})
if self.SkuIdSet:
GlobalData.gSkuids = (self.SkuIdSet)
if 'COMMON' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('COMMON')
if self.SkuUsageType == self.SINGLE:
if len(GlobalData.gSkuids) != 1:
if 'DEFAULT' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('DEFAULT')
if GlobalData.gSkuids:
GlobalData.gSkuids.sort()
def GetNextSkuId(self, skuname):
if not self._SkuInherit:
self._SkuInherit = {}
for item in self.SkuData.values():
self._SkuInherit[item[1]]=item[2] if item[2] else "DEFAULT"
return self._SkuInherit.get(skuname, "DEFAULT")
def GetSkuChain(self, sku):
if sku == "DEFAULT":
return ["DEFAULT"]
skulist = [sku]
nextsku = sku
while True:
nextsku = self.GetNextSkuId(nextsku)
skulist.append(nextsku)
if nextsku == "DEFAULT":
break
skulist.reverse()
return skulist
def SkuOverrideOrder(self):
skuorderset = []
for skuname in self.SkuIdSet:
skuorderset.append(self.GetSkuChain(skuname))
skuorder = []
for index in range(max(len(item) for item in skuorderset)):
for subset in skuorderset:
if index > len(subset)-1:
continue
if subset[index] in skuorder:
continue
skuorder.append(subset[index])
return skuorder
@property
def SkuUsageType(self):
if self._SkuIdentifier.upper() == "ALL":
return SkuClass.MULTIPLE
if len(self.SkuIdSet) == 1:
if self.SkuIdSet[0] == 'DEFAULT':
return SkuClass.DEFAULT
return SkuClass.SINGLE
if len(self.SkuIdSet) == 2 and 'DEFAULT' in self.SkuIdSet:
return SkuClass.SINGLE
return SkuClass.MULTIPLE
def DumpSkuIdArrary(self):
if self.SkuUsageType == SkuClass.SINGLE:
return "{0x0}"
ArrayStrList = []
for skuname in self.AvailableSkuIds:
if skuname == "COMMON":
continue
while skuname != "DEFAULT":
ArrayStrList.append(hex(int(self.AvailableSkuIds[skuname])))
skuname = self.GetNextSkuId(skuname)
ArrayStrList.append("0x0")
return "{{{myList}}}".format(myList=",".join(ArrayStrList))
@property
def AvailableSkuIdSet(self):
return self.AvailableSkuIds
@property
def SystemSkuId(self):
if self.SkuUsageType == SkuClass.SINGLE:
if len(self.SkuIdSet) == 1:
return self.SkuIdSet[0]
else:
return self.SkuIdSet[0] if self.SkuIdSet[0] != 'DEFAULT' else self.SkuIdSet[1]
else:
return 'DEFAULT'
## Get the integer value from string like "14U" or integer like 2
#
# @param Input The object that may be either a integer value or a string
#
# @retval Value The integer value that the input represents
#
def GetIntegerValue(Input):
if type(Input) in (int, long):
return Input
String = Input
if String.endswith("U"):
String = String[:-1]
if String.endswith("ULL"):
String = String[:-3]
if String.endswith("LL"):
String = String[:-2]
if String.startswith("0x") or String.startswith("0X"):
return int(String, 16)
elif String == '':
return 0
else:
return int(String)
#
# Pack a GUID (registry format) list into a buffer and return it
#
def PackGUID(Guid):
return pack(PACK_PATTERN_GUID,
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16)
)
#
# Pack a GUID (byte) list into a buffer and return it
#
def PackByteFormatGUID(Guid):
return pack(PACK_PATTERN_GUID,
Guid[0],
Guid[1],
Guid[2],
Guid[3],
Guid[4],
Guid[5],
Guid[6],
Guid[7],
Guid[8],
Guid[9],
Guid[10],
)
## DeepCopy dict/OrderedDict recusively
#
# @param ori_dict a nested dict or ordereddict
#
# @retval new dict or orderdict
#
def CopyDict(ori_dict):
dict_type = ori_dict.__class__
if dict_type not in (dict,OrderedDict):
return ori_dict
new_dict = dict_type()
for key in ori_dict:
if isinstance(ori_dict[key],(dict,OrderedDict)):
new_dict[key] = CopyDict(ori_dict[key])
else:
new_dict[key] = ori_dict[key]
return new_dict
#
# Remove the c/c++ comments: // and /* */
#
def RemoveCComments(ctext):
return re.sub('//.*?\n|/\*.*?\*/', '\n', ctext, flags=re.S)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 10498
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
util.py
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
from __future__ import annotations # Allow subscripting Popen
from dataclasses import dataclass, replace
from datetime import timedelta
from difflib import get_close_matches
from enum import Enum
from functools import reduce
from inspect import getfile
from math import ceil, floor, inf, isclose, isnan
from operator import mul
import os
from os import kill, name as os_name
from os.path import splitext
from pathlib import Path
from platform import node
from signal import signal, SIGINT
from subprocess import DEVNULL, PIPE, Popen, run
from stat import S_IREAD, S_IWRITE, S_IRUSR, S_IWUSR, S_IRGRP, S_IWGRP, S_IROTH, S_IWOTH
from statistics import median, StatisticsError
from sys import argv
from threading import Event, Thread
from time import sleep, time
from typing import Any, Callable, cast, Iterable, List, Mapping, Optional, Sequence, Union
from xml.etree.ElementTree import Element, parse as parse_xml
from psutil import process_iter
from result import Err, Ok, Result
from .collection_util import add, find, identity, is_empty, min_max_float
from .option import option_or
from .type_utils import check_cast, T, U, V, with_slots
def remove_str_start(s: str, start: str) -> str:
assert s.startswith(start), f"Expected {s} to start with {repr(start)}"
return s[len(start) :]
def remove_str_end(s: str, end: str) -> str:
assert s.endswith(end), f"Expected {s} to end with {end}"
return s[: -len(end)]
def remove_str_start_end(s: str, start: str, end: str) -> str:
return remove_str_end(remove_str_start(s, start), end)
def try_remove_str_start(s: str, start: str) -> Optional[str]:
return remove_str_start(s, start) if s.startswith(start) else None
def try_remove_str_end(s: str, end: str) -> Optional[str]:
return remove_str_end(s, end) if s.endswith(end) else None
def remove_char(s: str, char: str) -> str:
return s.translate(str.maketrans("", "", char))
def ensure_empty_dir(dir_path: Path) -> None:
ensure_dir(dir_path)
clear_dir(dir_path)
def unlink_if_exists(path: Path) -> None:
if path.exists():
path.unlink()
def clear_dir(dir_path: Path) -> None:
tries = 1
while tries > 0:
try:
# shutil.rmtree fails: github.com/hashdist/hashdist/issues/113#issuecomment-25374977
# TODO: avoid str(path)
for sub in dir_path.iterdir():
if not sub.is_dir():
sub.unlink()
for sub in dir_path.iterdir():
assert sub.is_dir()
clear_dir(sub)
sub.rmdir()
except OSError as e:
tries -= 1
if tries <= 0 or "The directory is not empty" not in e.strerror:
raise
sleep(1)
else:
break
def ensure_dir(dir_path: Path) -> None:
if not dir_path.exists():
assert dir_path.parent != dir_path
ensure_dir(dir_path.parent)
dir_path.mkdir()
def get_factor_diff(old: float, new: float) -> float:
if old == 0:
return 0 if new == 0 else inf if new > 0 else -inf
else:
return (new - old) / old
def get_max_factor_diff(values: Iterable[float]) -> Optional[float]:
mm = min_max_float(values)
return None if mm is None else get_factor_diff(*mm.to_pair())
def product(values: Sequence[float]) -> float:
return reduce(mul, values)
def geometric_mean(values: Sequence[float]) -> float:
# Geometric mean only works for positive values
assert all(v > 0 for v in values)
# 'pow' returns 'Any', this has caused me problems in the past
return check_cast(float, pow(product(values), 1.0 / len(values)))
def assert_is_percent(p: float) -> float:
return 0 <= p <= 100
def get_percent(f: float) -> float:
return f * 100
def percent_to_fraction(p: float) -> float:
return p / 100
def float_to_str(f: float) -> str:
if f == 0:
return "0"
elif isnan(f):
return "NaN"
else:
def get_fmt() -> str:
a = abs(f)
if 0.001 <= a < 10000:
if a < 0.01:
return "%.5f"
elif a < 0.1:
return "%.4f"
elif a < 1:
return "%.3f"
elif a < 10:
return "%.2f"
elif a < 100:
return "%.1f"
else:
return "%.0f"
else:
return "%.2e"
res = get_fmt() % f
assert isclose(float(res), f, rel_tol=5e-3)
return res
def float_to_str_smaller(f: float) -> str:
if f == 0:
return "0"
elif isnan(f):
return "NaN"
else:
def get_fmt() -> str:
a = abs(f)
if 0.01 <= a < 1000:
if a < 0.1:
return "%.3f"
elif a < 1:
return "%.2f"
elif a < 10:
return "%.1f"
else:
return "%.0f"
else:
return "%.1e"
res = get_fmt() % f
assert isclose(float(res), f, rel_tol=5e-2)
return res
def _assert_exists(path: Path) -> Path:
assert path.exists(), f"Could not find {path}"
return path
def assert_file_exists(path: Path) -> Path:
_assert_exists(path)
assert path.is_file(), f"{path} is not a file"
return path
def assert_dir_exists(path: Path) -> Path:
_assert_exists(path)
assert path.is_dir(), f"{path} is not a directory"
return path
def make_absolute_path(path: Path) -> Path:
if path.is_absolute():
return path
else:
return Path.cwd() / path
def get_existing_absolute_path(path: object, message: Optional[Callable[[], str]] = None) -> Path:
assert isinstance(path, str)
p = Path(path)
assert p.is_absolute(), f"Path {path} should be absolute" if message is None else message()
return _assert_exists(p)
def get_existing_absolute_file_path(
path: object, message: Optional[Callable[[], str]] = None
) -> Path:
p = get_existing_absolute_path(path, message)
assert p.is_file(), f"Path {p} exists, but is not a file"
return p
def stdev_frac(stdv: float, avg: float) -> float:
if avg == 0.0:
return 0.0 if stdv == 0.0 else 1.0
else:
return stdv / avg
def os_is_windows() -> bool:
return {OS.posix: False, OS.windows: True}[get_os()]
class OS(Enum):
posix = 0
windows = 1
def get_os() -> OS:
return {"nt": OS.windows, "posix": OS.posix}[os_name]
@with_slots
@dataclass(frozen=True)
class ExecArgs:
cmd: Sequence[str]
cwd: Optional[Path] = None
env: Optional[Mapping[str, str]] = None
# Don't print the command before running
quiet_print: bool = False
# Ignore print to stdout
quiet_stdout: bool = False
# Ignore print to stderr
quiet_stderr: bool = False
def print(self) -> None:
if not self.quiet_print:
print(self)
def __str__(self) -> str:
s = " ".join(self.cmd)
if self.cwd is not None:
s += f" (cwd {self.cwd})"
# printing env is too verbose
return s
def args_with_cmd(a: ExecArgs, cmd: Sequence[str]) -> ExecArgs:
# Note: replace is not type-safe, so putting this near the definition of cmd
return replace(a, cmd=cmd)
AnyPopen = Union["Popen[str]", "Popen[bytes]"]
def is_process_alive(process: AnyPopen) -> bool:
return process.poll() is None
class ExecError(Exception):
pass
def _call_and_allow_interrupts(args: ExecArgs) -> timedelta:
start_time_seconds = time()
process = Popen(
args.cmd,
cwd=args.cwd,
env=args.env,
stdout=DEVNULL if args.quiet_stdout else None,
stderr=DEVNULL if args.quiet_stderr else None,
)
def handler(sig: int, _: Any) -> None: # TODO: `_: FrameType`
process.send_signal(sig)
raise KeyboardInterrupt
signal(SIGINT, handler)
exit_code = process.wait()
if exit_code != 0:
quiet_warning = " (Try running without 'quiet_stderr')" if args.quiet_stderr else ""
raise ExecError(f"Process {args.cmd} failed with exit code {exit_code}{quiet_warning}")
return timedelta(seconds=time() - start_time_seconds)
def exec_cmd(args: ExecArgs) -> timedelta:
args.print()
return _call_and_allow_interrupts(args)
@with_slots
@dataclass(frozen=True)
class BenchmarkRunErrorInfo:
name: str
iteration_num: int
message: str
trace: List[str]
def print(self) -> None:
print(
f"- Benchmark: '{self.name}' -\n"
f"Iteration: {self.iteration_num}\n"
f"Error Message: {self.message}\n"
f"\nStack Trace:\n{self.__rebuild_trace()}\n"
)
def __rebuild_trace(self) -> str:
return ''.join(self.trace)
@with_slots
@dataclass(frozen=True)
class ConfigRunErrorInfo:
name: str
benchmarks_run: BenchmarkErrorList
def print(self) -> None:
print(f"=== Configuration '{self.name}' ===\n")
for bench in self.benchmarks_run:
bench.print()
def add_benchmark(self, new_bench: BenchmarkRunErrorInfo) -> None:
self.benchmarks_run.append(new_bench)
@with_slots
@dataclass(frozen=True)
class CoreRunErrorInfo:
name: str
configs_run: ConfigurationErrorMap
def print(self) -> None:
print(f"===== Core '{self.name}' =====\n")
for config in self.configs_run.values():
config.print()
def add_config(self, new_config: ConfigRunErrorInfo) -> None:
add(self.configs_run, new_config.name, new_config)
RunErrorMap = Mapping[str, CoreRunErrorInfo]
ConfigurationErrorMap = Mapping[str, ConfigRunErrorInfo]
BenchmarkErrorList = List[BenchmarkRunErrorInfo]
def add_new_error(
run_errors: RunErrorMap,
core_name: str,
config_name: str,
bench_name: str,
iteration_num: int,
message: str,
trace: List[str]
) -> None:
if core_name not in run_errors:
bench_list = [BenchmarkRunErrorInfo(bench_name, iteration_num, message, trace)]
config_dict = {config_name : ConfigRunErrorInfo(config_name, bench_list)}
add(run_errors, core_name, CoreRunErrorInfo(core_name, config_dict))
else:
core_info = run_errors[core_name]
if config_name not in core_info.configs_run:
bench_list = [BenchmarkRunErrorInfo(bench_name, iteration_num, message, trace)]
core_info.add_config(ConfigRunErrorInfo(config_name, bench_list))
else:
config_info = core_info.configs_run[config_name]
config_info.add_benchmark(BenchmarkRunErrorInfo(bench_name, iteration_num, message, trace))
@with_slots
@dataclass(frozen=True)
class WaitOnProcessResult:
stdout: str
# None if timed out
time_taken: Optional[timedelta]
def exec_start(args: ExecArgs, pipe_stdout: bool, pipe_stdin: bool = False) -> Popen[str]:
args.print()
assert not (args.quiet_stdout and pipe_stdout)
return Popen(
args.cmd,
env=args.env,
cwd=None if args.cwd is None else str(args.cwd),
stdin=PIPE if pipe_stdin else None,
stdout=DEVNULL if args.quiet_stdout else PIPE if pipe_stdout else None,
text=True,
)
def wait_on_process_with_timeout(
process: Popen[str], start_time_seconds: float, timeout_seconds: float
) -> WaitOnProcessResult:
assert is_process_alive(process)
done = Event()
killed = False
def process_kill_function() -> None:
nonlocal killed
is_done = done.wait(timeout=timeout_seconds)
if not is_done and is_process_alive(process):
print(f"Process timed out after {timeout_seconds} seconds! Sending SIGINT")
# process.send_signal(SIGINT) # This causes ValueError: Unsupported signal: 2
kill_process(process, time_allowed_seconds=1)
killed = True
process_killer = Thread(target=process_kill_function)
process_killer.start()
stdout, stderr = process.communicate()
assert stderr is None
returncode = process.wait()
end_time_seconds = time()
# If the process exited normally early, process_kill_function can exit.
# (If it was killed, this will have no effect)
done.set()
process_killer.join()
assert returncode == process.returncode
assert killed or process.returncode == 0, f"Process failed with code {process.returncode}"
return WaitOnProcessResult(
stdout=stdout,
time_taken=None if killed else timedelta(seconds=(end_time_seconds - start_time_seconds)),
)
def kill_process(process: AnyPopen, time_allowed_seconds: float) -> None:
assert is_process_alive(process)
kill(process.pid, SIGINT)
start_time_seconds = time()
while is_process_alive(process):
sleep(1)
if (time() - start_time_seconds) > time_allowed_seconds:
print(
f"Process '{check_cast(str, process.args)}' refused to shut down normally. "
+ "Trying again without asking nicely."
)
process.kill()
break
assert not is_process_alive(process)
class ExecutableNotFoundException(Exception):
def __init__(self, path: Path):
self.path = path
super().__init__(f"Cannot find {path}")
@with_slots
@dataclass(frozen=True)
class OutputAndExitCode:
stdout: str
exit_code: int
def exec_and_get_output_and_exit_code(args: ExecArgs) -> OutputAndExitCode:
args.print()
# These arguments don't apply here, should have their default values
assert args.quiet_stdout is False and args.quiet_stderr is False
try:
r = run(args.cmd, stdout=PIPE, cwd=args.cwd, env=args.env, check=False)
except FileNotFoundError:
raise ExecutableNotFoundException(Path(args.cmd[0])) from None
except NotADirectoryError:
raise Exception(f"Invalid cwd: {args.cwd}") from None
return OutputAndExitCode(decode_stdout(r.stdout), r.returncode)
def exec_and_get_output(args: ExecArgs, expect_exit_code: Optional[int] = None) -> str:
expected_exit_code = option_or(expect_exit_code, 0)
res = exec_and_get_output_and_exit_code(args)
assert (
res.exit_code == expected_exit_code
), f"Returned with code {res.exit_code}, expected {expected_exit_code}"
return res.stdout
@with_slots
@dataclass(frozen=True)
class ProcessResult:
exit_code: int
stdout: str
stderr: str
def exec_and_get_result(args: ExecArgs) -> ProcessResult:
args.print()
# These arguments don't apply here, should have their default values
assert args.quiet_stdout is False and args.quiet_stderr is False
try:
r = run(args.cmd, stdout=PIPE, stderr=PIPE, cwd=args.cwd, env=args.env, check=False)
except FileNotFoundError:
raise Exception(f"Cannot find {args.cmd[0]}") from None
return ProcessResult(
exit_code=r.returncode, stdout=decode_stdout(r.stdout), stderr=decode_stdout(r.stderr)
)
def decode_stdout(stdout: bytes) -> str:
# Microsoft trademark confuses python
stdout = stdout.replace(b"\xae", b"")
return stdout.decode("utf-8").strip().replace("\r", "")
def exec_and_expect_output(args: ExecArgs, expected_output: str, err: str) -> None:
output = exec_and_get_output(args)
if output != expected_output:
print("actual:", repr(output))
print("expect:", repr(expected_output))
raise Exception(err)
_BYTES_PER_KB: int = 2 ** 10
_BYTES_PER_MB: int = 2 ** 20
_BYTES_PER_GB: int = 2 ** 30
def bytes_to_kb(n_bytes: Union[int, float]) -> float:
return n_bytes / _BYTES_PER_KB
def bytes_to_mb(n_bytes: Union[int, float]) -> float:
return n_bytes / _BYTES_PER_MB
def bytes_to_gb(n_bytes: Union[int, float]) -> float:
return n_bytes / _BYTES_PER_GB
def kb_to_bytes(kb: float) -> int:
return round(kb * _BYTES_PER_KB)
def mb_to_bytes(mb: float) -> int:
return round(mb * _BYTES_PER_MB)
def gb_to_bytes(gb: float) -> int:
return round(gb * _BYTES_PER_GB)
def kb_to_mb(kb: float) -> float:
return bytes_to_mb(kb_to_bytes(kb))
def mb_to_gb(mb: float) -> float:
return bytes_to_gb(mb_to_bytes(mb))
def gb_to_mb(gb: float) -> float:
return bytes_to_mb(gb_to_bytes(gb))
MSECS_PER_SECOND = 1000
USECS_PER_SECOND = 1_000_000
def show_size_bytes(n_bytes: float) -> str:
return show_in_units(
n_bytes,
(Unit(_BYTES_PER_GB, "GB"), Unit(_BYTES_PER_MB, "MB"), Unit(_BYTES_PER_KB, "KB")),
Unit(1, "bytes"),
)
@with_slots
@dataclass(frozen=True)
class Unit:
amount: float
name: str
def show_in_units(amount: float, units: Sequence[Unit], base_unit: Unit) -> str:
# Find a unit where this is >= 1 of it
unit = option_or(find(lambda u: abs(amount) >= u.amount, units), base_unit)
amount_in_units = (
str(amount) if unit.amount == 1 and amount % 1 == 0 else "%.2f" % (amount / unit.amount)
)
return amount_in_units + f" {unit.name}"
def seconds_to_msec(seconds: float) -> float:
return seconds * MSECS_PER_SECOND
def seconds_to_usec(seconds: float) -> float:
return seconds * USECS_PER_SECOND
def msec_to_seconds(msec: float) -> float:
return msec / MSECS_PER_SECOND
def mhz_to_ghz(mhz: float) -> float:
return mhz / 1000
# Python's os.walk won't work, because it takes Strings and not paths.
# Unfortunately `Path(str(path))` isn't the identity if path is `//machine/`. (Python bug?)
def walk_files_recursive(path: Path, filter_dir: Callable[[Path], bool]) -> Iterable[Path]:
for sub in path.iterdir():
if sub.is_dir():
if filter_dir(sub):
for x in walk_files_recursive(sub, filter_dir):
yield x
else:
yield sub
def get_hostname() -> str:
return node()
# TODO:MOVE
def assert_admin() -> None:
if not is_admin():
raise Exception(
"PerfView requires you to be an administrator"
if os_is_windows()
else "cgcreate requires you to be a super user"
)
def is_admin() -> bool:
if os_is_windows():
# Do this import lazily as it is only available on Windows
from win32com.shell.shell import IsUserAnAdmin # pylint:disable=import-outside-toplevel
return IsUserAnAdmin()
else:
# Importing it this way since geteuid doesn't exist in windows and mypy complains there
geteuid = cast(Callable[[], int], getattr(os, "geteuid"))
return geteuid() == 0
def get_extension(path: Path) -> str:
return splitext(path.name)[1]
def add_extension(p: Path, ext: str) -> Path:
return p.parent / f"{p.name}.{ext}"
def remove_extension(p: Path) -> Path:
return p.parent / splitext(p.name)[0]
def change_extension(p: Path, ext: str) -> Path:
return add_extension(remove_extension(p), ext)
def get_or_did_you_mean(mapping: Mapping[str, V], key: str, name: str) -> V:
try:
return mapping[key]
except KeyError:
raise Exception(did_you_mean(tuple(mapping.keys()), key, name)) from None
def did_you_mean(
choices: Iterable[str], choice: str, name: str, show_choice: Callable[[str], str] = identity
) -> str:
assert choice not in choices
# Mypy has the return type of get_close_matches wrong?
close = check_cast(Sequence[str], get_close_matches(choice, choices)) # type: ignore
if is_empty(close):
choices = tuple(choices)
if len(choices) < 20:
return f"Bad {name} {show_choice(choice)}. Available: {tuple(choices)}"
else:
return f"Bad {name} {show_choice(choice)}."
elif len(close) == 1:
return f"Bad {name} {show_choice(choice)}. Did you mean {show_choice(close[0])}?"
else:
close_str = "\n".join(tuple(show_choice(c) for c in close))
return f"Bad {name} {show_choice(choice)}. Did you mean one of:\n{close_str}"
def hex_no_0x(i: int) -> str:
return remove_str_start(hex(i), "0x")
def try_parse_single_tag_from_xml_document(path: Path, tag_name: str) -> Optional[str]:
assert tag_name.startswith("{"), "Should start with schema"
root = parse_xml(str(path)).getroot()
tags = tuple(_iter_tag_recursive(root, tag_name))
if is_empty(tags):
return None
else:
assert len(tags) == 1 # Should only be specified once
tag = tags[0]
return tag.text
def _iter_tag_recursive(e: Element, tag_name: str) -> Iterable[Element]:
for child in e:
if child.tag == tag_name:
yield child
else:
yield from _iter_tag_recursive(child, tag_name)
# Note: WeakKeyDictionary does not seem to work on CLR types. So using this hack instead.
def lazy_property(obj: T, f: Callable[[T], U], name: Optional[str] = None) -> U:
if name is None:
# Mypy expects f to be a "FunctionType", but I don't know how to import that
name = f"{getfile(cast(Any, f))}/{f.__name__}"
res: Optional[U] = getattr(obj, name, None)
if res is None:
res = f(obj)
assert res is not None
setattr(obj, name, res)
return res
def opt_max(i: Iterable[float]) -> Optional[float]:
try:
return max(i)
except ValueError:
return None
def opt_median(i: Iterable[float]) -> Optional[float]:
try:
return median(i)
except StatisticsError:
return None
# numpy has problems on ARM, so using this instead.
def get_percentile(values: Sequence[float], percent: float) -> float:
assert not is_empty(values)
assert 0.0 <= percent <= 100.0
sorted_values = sorted(values)
fraction = percent / 100.0
index_and_fraction = (len(values) - 1) * fraction
prev_index = floor(index_and_fraction)
next_index = ceil(index_and_fraction)
# The closer we are to 'next_index', the more 'next' should matter
next_factor = index_and_fraction - prev_index
prev_factor = 1.0 - next_factor
return sorted_values[prev_index] * prev_factor + sorted_values[next_index] * next_factor
def get_95th_percentile(values: Sequence[float]) -> Result[str, float]:
return Err("<no values>") if is_empty(values) else Ok(get_percentile(values, 95))
def update_file(path: Path, text: str) -> None:
if (not path.exists()) or path.read_text(encoding="utf-8") != text:
print(f"Updating {path}")
path.write_text(text, encoding="utf-8")
# When we run a test with 'sudo', we need to make sure other users can access the file
def give_user_permissions(file: Path) -> None:
flags = S_IREAD | S_IWRITE | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH
file.chmod(flags)
def check_no_processes(names: Sequence[str]) -> None:
assert all(name.islower() for name in names)
for proc in process_iter():
for name in names:
suggestion = {
OS.posix: f"pkill -f {name}",
OS.windows: f'Get-Process | Where-Object {{$_.Name -like "{name}"}} | Stop-Process',
}[get_os()]
assert name not in proc.name().lower(), (
f"'{name}' is already running\n" + f"Try: `{suggestion}`"
)
def get_command_line() -> str:
return f"> py {' '.join(argv)}"
|
botfront_anonymized_tracker_store.py
|
import logging
import jsonpickle
import requests
import time
import os
import re
from threading import Thread
from rasa.core.tracker_store import TrackerStore
from rasa.shared.core.trackers import DialogueStateTracker, EventVerbosity
from .text_anonymizer import TextAnonymizer
from sgqlc.endpoint.http import HTTPEndpoint
import urllib.error
logger = logging.getLogger(__name__)
logging.getLogger("sgqlc.endpoint.http").setLevel(logging.WARNING)
jsonpickle.set_preferred_backend("json")
jsonpickle.set_encoder_options("json", ensure_ascii=False)
GET_TRACKER = """
query trackerStore(
$senderId: String!
$projectId: String!
$after: Int
$maxEvents: Int
) {
trackerStore(senderId: $senderId, projectId:$projectId, after:$after, maxEvents:$maxEvents) {
tracker
lastIndex
lastTimestamp
}
}
"""
INSERT_TRACKER = """
mutation insertTracker(
$senderId: String!
$projectId: String!
$tracker: Any
$env: Environment
) {
insertTrackerStore(senderId: $senderId, projectId:$projectId, tracker:$tracker, env: $env){
lastIndex
lastTimestamp
}
}
"""
UPDATE_TRACKER = """
mutation updateTracker(
$senderId: String!
$projectId: String!
$tracker: Any
$env: Environment
) {
updateTrackerStore(senderId: $senderId, projectId: $projectId, tracker: $tracker, env: $env){
lastIndex
lastTimestamp
}
}
"""
def _start_sweeper(tracker_store, break_time):
while True:
try:
tracker_store.sweep()
finally:
time.sleep(break_time)
class BotfrontAnonymizedTrackerStore(TrackerStore):
def __init__(self, domain, host, **kwargs):
self.project_id = os.environ.get("BF_PROJECT_ID")
self.tracker_persist_time = kwargs.get("tracker_persist_time", 3600)
self.test_tracker_persist_time = kwargs.get("test_tracker_persist_time", 240)
self.max_events = kwargs.get("max_events", 100)
self.trackers = {}
self.test_trackers = {}
self.trackers_info = (
{}
) # in this stucture we will keep the last index and the last timestamp of events in the db for a said tracker
self.sweeper = Thread(target=_start_sweeper, args=(self, 30))
self.sweeper.setDaemon(True)
self.sweeper.start()
api_key = os.environ.get("API_KEY")
headers = [{"Authorization": api_key}] if api_key else []
self.graphql_endpoint = HTTPEndpoint(host, *headers)
self.host = host
self.environment = os.environ.get("BOTFRONT_ENV", "development")
self.botfront_test_regex = re.compile('^bot_regression_test_')
self.text_anonymizer = TextAnonymizer()
super(BotfrontAnonymizedTrackerStore, self).__init__(domain, event_broker=kwargs.get("event_broker"))
logger.debug("BotfrontAnonymizedTrackerStore tracker store created")
def _graphql_query(self, query, params):
try:
response = self.graphql_endpoint(query, params)
if response.get("errors"):
raise urllib.error.URLError(
", ".join([e.get("message") for e in response.get("errors")])
)
return response.get("data")
except urllib.error.URLError as e:
message = e.reason
logger.error(
f"Something went wrong getting the tracker from {self.host}: {message}"
)
return {}
def _fetch_tracker(self, sender_id, lastIndex):
data = self._graphql_query(
GET_TRACKER,
{
"senderId": sender_id,
"projectId": self.project_id,
"after": lastIndex,
"maxEvents": self.max_events,
},
)
return data.get("trackerStore")
def _insert_tracker_gql(self, sender_id, tracker):
data = self._graphql_query(
INSERT_TRACKER,
{
"senderId": sender_id,
"projectId": self.project_id,
"tracker": tracker,
"env": self.environment,
},
)
return data.get("insertTrackerStore")
def _update_tracker_gql(self, sender_id, tracker):
data = self._graphql_query(
UPDATE_TRACKER,
{
"senderId": sender_id,
"projectId": self.project_id,
"tracker": tracker,
"env": self.environment,
},
)
return data.get("updateTrackerStore")
def _get_last_index(self, sender_id):
info = self.trackers_info.get(sender_id, -1)
if info == -1:
return info
elif info.get("last_index") is None:
return -1
else:
return info.get("last_index")
def _get_last_timestamp(self, sender_id):
info = self.trackers_info.get(sender_id, 0)
if info == 0:
return info
elif info.get("last_timestamp") is None:
return 0
else:
return info.get("last_timestamp")
def _store_tracker_info(self, sender_id, tracker_info):
if tracker_info is not None:
self.trackers_info[sender_id] = {
"last_index": tracker_info["lastIndex"],
"last_timestamp": tracker_info["lastTimestamp"],
}
def _anonymize_tracker(self, serialized_tracker: dict) -> dict:
serialized_tracker["latest_message"]["text"] = self.text_anonymizer.anonymize_text(serialized_tracker["latest_message"]["text"])
for event in serialized_tracker["events"]:
if event["event"] == "user":
event["text"] = self.text_anonymizer.anonymize_text(event["text"])
event["parse_data"]["text"] = self.text_anonymizer.anonymize_text(event["parse_data"]["text"])
return serialized_tracker
def save(self, canonical_tracker):
serialized_tracker = self._serialize_tracker_to_dict(canonical_tracker)
serialized_tracker = self._anonymize_tracker(serialized_tracker)
sender_id = canonical_tracker.sender_id
if self.botfront_test_regex.match(sender_id):
self.test_trackers[sender_id] = canonical_tracker
return serialized_tracker["events"]
# call the event broker below the test exit so that the logs aren't filled with testing data
if self.event_broker:
self.stream_events(canonical_tracker)
# Fetch here just in case retrieve wasn't called first
tracker = self.trackers.get(sender_id)
if tracker is None: # the tracker does not exist localy ( first save)
updated_info = self._insert_tracker_gql(sender_id, serialized_tracker)
self.trackers[sender_id] = serialized_tracker
# update the last index and last time stamp for future uses
self._store_tracker_info(sender_id, updated_info)
return serialized_tracker["events"]
else: # the tracker exist localy
# Insert only the new examples
last_timestamp = self._get_last_timestamp(sender_id)
new_events = list(
filter(
lambda x: x["timestamp"] > last_timestamp,
serialized_tracker["events"],
)
)
tracker_shallow_copy = {key: val for key, val in serialized_tracker.items()}
tracker_shallow_copy["events"] = new_events
# only send the new events to the remote tracker
updated_info = self._update_tracker_gql(sender_id, tracker_shallow_copy)
# update the last index and last time stamp for future uses
self._store_tracker_info(sender_id, updated_info)
self.trackers[sender_id] = serialized_tracker
return serialized_tracker["events"]
def _convert_tracker(self, sender_id, tracker):
if self.domain:
return DialogueStateTracker.from_dict(
sender_id, tracker["events"], self.domain.slots
)
else:
logger.warning(
"Can't recreate tracker from mongo storage "
"because no domain is set. Returning `None` "
"instead."
)
return None
def _update_tracker(self, sender_id, remote_tracker):
old_tracker = self.trackers.get(sender_id)
if old_tracker is not None:
events = old_tracker.get("events")
remote_events = remote_tracker.get("events")
# if we recieve max event it means that the we skiped some events
# as we take only the last max events, so we remplace the local copy with the remote data
if len(remote_events) == self.max_events:
new_events = remote_events
else:
new_events = [*events, *remote_events]
new_tracker = {**old_tracker, **remote_tracker}
new_tracker["events"] = new_events
self.trackers[sender_id] = new_tracker
return new_tracker
else:
self.trackers[sender_id] = remote_tracker
return remote_tracker
def retrieve(self, sender_id):
if self.botfront_test_regex.match(sender_id):
return self.test_trackers.get(sender_id)
last_index = self._get_last_index(sender_id)
# retreive all new info since the last sync (given by last index)
new_tracker_info = self._fetch_tracker(sender_id, last_index)
current_tracker = self.trackers.get(sender_id)
# do not chane the order of these ifs
# ortherwise you will get synchornication issues when working with multiple rasa instances
# the tracker exist on the remote and may exist locally
if new_tracker_info is not None:
self._store_tracker_info(sender_id, new_tracker_info)
tracker = self._update_tracker(sender_id, new_tracker_info.get("tracker"))
return self._convert_tracker(sender_id, tracker)
# the tracker do not exist yet
if current_tracker is None:
return None
# the tracker exist localy an there is no new infos
return self._convert_tracker(sender_id, current_tracker)
def cleanup_trackers(self, trackers, persist_time):
for key in list(
trackers.keys()
):
## wraped in a try block so if an exception occurs it does not stop the sweep mechanism
try:
tracker = trackers.get(key)
max_event_time = time.time() - persist_time
latest_event = float("inf")
try:
latest_event = tracker.latest_message.timestamp
except:
latest_event = tracker.get("latest_event_time", float("inf"))
pass
if latest_event < max_event_time:
logger.debug("SWEEPER: Removing botfront test tracker {}".format(key))
if key in trackers:
del trackers[key]
if key in self.trackers_info:
del self.trackers_info[key]
except Exception as e:
print(e)
pass
def sweep(self):
self.cleanup_trackers(self.test_trackers, self.test_tracker_persist_time)
self.cleanup_trackers(self.trackers, self.tracker_persist_time)
@staticmethod
def _serialize_tracker_to_dict(canonical_tracker):
return canonical_tracker.current_state(EventVerbosity.ALL)
|
solve_all_sudokus.py
|
#!/usr/bin/env python3
from grid import SudokuGrid
from solver import SudokuSolver
import os.path
import time
import multiprocessing
def solve_all(running_times):
for l in range(1, 245):
g = SudokuGrid.from_file(os.path.join(os.path.dirname(__file__), "..", "sudoku_db.txt", l)
start = time.monotonic()
solver = SudokuSolver(g)
solver.solve()
running_times.append(1000 * (time.monotonic() - start))
print("\r[{: <40}] ({:.0%})".format('='*int(40 * l / 244), l / 244), end='')
if __name__ == "__main__":
manager = multiprocessing.Manager()
running_times = manager.list()
p = multiprocessing.Process(target=solve_all, args=(running_times,))
print("Starting solver on all 244 instances in 'sudoku_db.txt' with a time-out of 5min...")
p.start()
p.join(300)
if p.is_alive():
print("\nTime-out!")
p.terminate()
p.join()
else:
print()
n_runs = len(running_times)
print("Number of completed run: {}".format(n_runs))
print("Running times statistics: min = {:.3f}ms, average = {:.3f}ms, max = {:.3f}ms".format(
min(running_times), sum(running_times) / n_runs, max(running_times)))
|
__init__.py
|
import subprocess,threading,sys,os,config
import json
def popenAndCall(args, shell=True,stdout=sys.stdout,cwd=os.getcwd(),onExit=None):
"""
Runs the given args in a subprocess.Popen, and then calls the function
onExit when the subprocess completes.
onExit is a callable object, and popenArgs is a list/tuple of args that
would give to subprocess.Popen.
"""
def runInThread(onExit):
proc = subprocess.Popen(args,shell=shell,stdout=stdout,cwd=cwd)
proc.wait()
if onExit is not None:
onExit()
return
thread = threading.Thread(target=runInThread, args=(onExit))
thread.start()
# returns immediately after the thread starts
return thread
def compile_tex(tex_files:list,root_path):
output_path = os.path.join(root_path)
log_path = os.path.join(root_path,"log")
os.makedirs(output_path,exist_ok=True)
os.makedirs(log_path,exist_ok=True)
for tex_file in tex_files:
_,fname = os.path.split(tex_file)
fpre,_ = os.path.splitext(fname)
open(os.path.join(output_path,f"{fpre}.flag"),"w").close()
params = ["texliveonfly",]
relpath = os.path.relpath(tex_file,output_path)
params.append(relpath)
params.extend(["-r","True"])
shell = " ".join(params)
flog = open(os.path.join(log_path,f"{fpre}.txt"),"w",encoding="utf-8")
subprocess.Popen(shell, shell=True,stdout=flog,cwd=output_path)
def get_flist2(tid):
log_file_path = os.path.join(config.UPLOAD_ROOT_PATH, f"{tid}","build.log")
jstr = json.load(open(log_file_path,"r"))
pdfs = []
errfs = []
for fpath,res in jstr.items():
path,fname = os.path.split(fpath)
fpre,_ = os.path.splitext(fname)
if res == 1:
errlog_file = os.path.join(config.UPLOAD_ROOT_PATH,f"{tid}",config.LOG_DIR_NAME,f"{fpre}.txt")
errfs.append(errlog_file)
if os.path.exists(fpath):
pdfs.append(fpath)
else:
pdfs.append(fpath)
pdfs = [os.path.relpath(f,config.UPLOAD_ROOT_PATH) for f in pdfs]
errfs = [os.path.relpath(f,config.UPLOAD_ROOT_PATH) for f in errfs]
return pdfs,errfs
def get_flist(tid):
root_path = os.path.join(config.UPLOAD_ROOT_PATH, f"{tid}")
output_path = os.path.join(root_path, config.BUILD_DIR_NAME)
log_path = os.path.join(root_path,config.LOG_DIR_NAME)
fs = os.listdir(output_path)
pdfs = []
errfs = []
for f in fs:
if f.endswith("pdf"):
pdfs.append(f)
for f in fs:
if f.endswith("res"):
fpre,_ = os.path.splitext(f)
pdf = f"{fpre}.pdf"
if pdf not in pdfs: # 编译未通过
logf = os.path.join(log_path,f"{fpre}.txt")
errfs.append(logf)
pdfs = [os.path.join(output_path,f) for f in pdfs]
errfs = [os.path.join(log_path,f) for f in errfs]
pdfs = [os.path.relpath(f,config.UPLOAD_ROOT_PATH) for f in pdfs]
errfs = [os.path.relpath(f,config.UPLOAD_ROOT_PATH) for f in errfs]
return pdfs,errfs
def check_compile(tid):
flag_path = os.path.join(config.UPLOAD_ROOT_PATH,f"{tid}","1")
return os.path.exists(flag_path)
def convert_markdown(file,output):
from marktex.texrender.toTex import MarkTex
doc = MarkTex.convert_file(file,output_dir=output)
path,fname = os.path.split(file)
fpre,_ = os.path.splitext(fname)
doc.generate_tex(fpre)
texfpath = os.path.join(path,f"{fpre}.tex")
return texfpath
|
main.py
|
"""
Main
Program entrypoint.
Can be run with `python main.py`
Or through the CLI with `scly start` after installing.
"""
from simplesensor.shared.threadsafeLogger import ThreadsafeLogger
from simplesensor.loggingEngine import LoggingEngine
from simplesensor.shared.message import Message
from importlib import import_module
import multiprocessing as mp
from simplesensor import mainConfigLoader as mainConfigLoader
from threading import Thread
import simplesensor
import time
import sys
import os
import os.path
from .version import __version__
# Dict of processes, threadsafe queues to handle
# Keys will be the module names
processes = {}
queues = {}
# Define a single inbound message queue for each of the module types.
# These are shared among modules since the messages should come in order.
# The main process (main.py) handles forwarding inbound messages to the correct recipient(s).
queues['cpInbound']=mp.Queue()
queues['comInbound']=mp.Queue()
# Logging queue setup
queues['logging'] = mp.Queue()
logger = ThreadsafeLogger(queues['logging'], "main")
# Config
baseConfig = mainConfigLoader.load(queues['logging'], "main")
# append main version onto the base config to pass to collection and communication child modules
baseConfig['ss_version'] = __version__
# Logging output engine
loggingEngine = LoggingEngine(loggingQueue=queues['logging'], config=baseConfig)
loggingEngine.start()
# Config
# baseConfig = mainConfigLoader.load(queues['logging'], "main")
# WIP - new config loader
# configLoader = ConfigLoader('main',
# os.path.join(os.path.dirname(__file__), 'config'),
# 'base.conf',
# queues['logging']
# )
# baseConfig = configLoader.load()
# del configLoader
_collectionModuleNames = baseConfig['CollectionModules']
_communicationModuleNames = baseConfig['CommunicationModules']
if len(_collectionModuleNames)==0 or len(_communicationModuleNames)==0:
logger.warn('Without at least one of each communication and'
+ 'collection modules active, SimpleSensor does not do much.'
+ 'Use command `scly config --name base` to configure modules.'
+ 'Use command `scly install --name <some_branch> --type <communication/collection>` '
+ 'to install a module. See https://github.com/AdobeAtAdobe/SimpleSensor/blob/master/README.md'
+ 'for more details.')
_collectionModules = {}
_communicationModules = {}
def _find_getch():
""" Returns a getch function for the system in use. """
try:
import termios
except ImportError:
# Return Windows' getch
import msvcrt
return msvcrt.getch
# Create and return a getch that manipulates the tty
import tty
def _getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
return _getch
getch = _find_getch()
def process_input(toexit):
""" Process keystrokes, exit on esc key. """
ch = getch()
if ch == b'\x1b':
toexit.append(True)
# Set up input thread to watch for esc key
toExit=[]
inputThread = Thread(target=process_input, args=(toExit,))
inputThread.setDaemon(True)
inputThread.start()
# For each collection module, import, initialize
for moduleName in _collectionModuleNames:
try:
logger.debug('importing %s'%(moduleName))
_collectionModules[moduleName] = import_module('simplesensor.collection_modules.%s'%moduleName)
queues[moduleName] = {}
# Each module has it's own incoming message queue (outbound from main proc)
# This is for cases where messages should be handled by specific modules, not all.
queues[moduleName]['out'] = mp.Queue()
except Exception as e:
logger.error('Error importing %s: %s'%(moduleName, e))
# For each collection module, import, initialize, and create an in/out queue
for moduleName in _communicationModuleNames:
try:
logger.debug('importing %s'%(moduleName))
_communicationModules[moduleName] = import_module('simplesensor.communication_modules.%s'%moduleName)
queues[moduleName] = {}
queues[moduleName]['out'] = mp.Queue()
except Exception as e:
logger.error('Error importing %s: %s'%(moduleName, e))
alive = True
def send_message(message):
""" Put outbound message onto queues of all active communication channel threads,
or the modules defined in the recipients field of the message.
Always send string messages, as they are control messages like 'SHUTDOWN'.
"""
if type(message.recipients) == str:
recipients = [message.recipients]
else:
recipients = message.recipients
if recipients in [['communication_modules'],['all'],['local_only']]:
for moduleName in _communicationModuleNames:
if recipients == ['local_only'] and ~processes[moduleName].low_cost(): break
try:
queues[moduleName]['out'].put_nowait(message)
if baseConfig['TestMode']:
if os.name is not 'posix':
logger.debug("%s queue size is %s"%(moduleName, queues[moduleName]['out'].qsize()))
except Exception as e:
logger.error('Error adding message to module %s queue: %s'%(moduleName, e))
else:
# Send to the set recipients only
for recipient in recipients:
try:
queues[recipient]['out'].put_nowait(message)
if baseConfig['TestMode']:
if os.name is not 'posix':
logger.debug("%s queue size is %s"%(recipient, queues[recipient]['out'].qsize()))
except Exception as e:
logger.error('Error adding message to %s queue: %s'%(recipient, e))
if recipients in [['collection_modules'],['all'],['local_only']]:
for moduleName in _collectionModuleNames:
if recipients == ['local_only'] and ~processes[moduleName].low_cost(): break
try:
queues[moduleName]['out'].put_nowait(message)
if baseConfig['TestMode']:
if os.name is not 'posix':
logger.debug("%s queue size is %s"%(moduleName, queues[moduleName]['out'].qsize()))
except Exception as e:
logger.error('Error adding message to module %s queue: %s'%(moduleName, e))
def load_communication_channels():
""" Create a process for each communication channel specified in base.conf """
for moduleName in _communicationModuleNames:
try:
logger.info('Loading communication module : %s'%moduleName)
proc = _communicationModules[moduleName].CommunicationModule(baseConfig,
queues[moduleName]['out'],
queues['comInbound'],
queues['logging'])
processes[moduleName] = proc
proc.start()
except Exception as e:
logger.error('Error importing %s: %s'%(moduleName, e))
def load_collection_points():
""" Create a new process for each collection point module specified in base.conf """
for moduleName in _collectionModuleNames:
try:
logger.info('Loading collection module : %s'%moduleName)
proc = _collectionModules[moduleName].CollectionModule(baseConfig,
queues[moduleName]['out'],
queues['cpInbound'],
queues['logging'])
processes[moduleName] = proc
proc.start()
except Exception as e:
print(e)
logger.error('Error importing %s: %s'%(moduleName, e))
def main():
""" Main control logic.
Initiate all communication channels and collection point.
Loop to monitor messages from collection point,
handing them off to communication channels as they come in.
"""
logger.info('Loading communication channels')
load_communication_channels()
logger.info("Loading collection points")
load_collection_points()
while alive and not toExit:
# Listen to inbound message queues for messages
if (queues['cpInbound'].empty() == False):
try:
message = queues['cpInbound'].get(block=False, timeout=1)
if message is not None:
if message.topic.upper() == "SHUTDOWN":
logger.info("SHUTDOWN handled")
shutdown()
else:
send_message(message)
except Exception as e:
logger.error("Unable to read collection point queue : %s " %e)
elif (queues['comInbound'].empty() == False):
try:
message = queues['comInbound'].get(block=False, timeout=1)
if message is not None:
if message.topic.upper() == "SHUTDOWN":
logger.info("SHUTDOWN handled")
shutdown()
else:
send_message(message)
except Exception as e:
logger.error("Unable to read communication channel queue : %s " %e)
else:
time.sleep(.25)
shutdown()
def shutdown():
""" Send shutdown message to all communication channel threads
and collection points, join and exit them.
"""
logger.info("Shutting down main process")
# Send to communication methods
message = Message(topic='SHUTDOWN', sender_id='main', recipients='all')
send_message(message)
queues['logging'].put_nowait(message)
kill_processes()
alive = False
sys.exit(0)
def kill_processes():
""" Wait for each process to die until timeout is reached, then terminate. """
print('Killing processes...')
timeout = 2
for name, proc in processes.items():
proc.join()
p_sec = 0
for second in range(timeout):
if proc.is_alive():
time.sleep(1)
p_sec += 1
if p_sec >= timeout:
print('Terminating process %s - %s'%(name, proc))
proc.terminate()
def start():
""" Main entry point for running on cmd line. """
python_version = sys.version_info.major
if python_version == 3:
main()
else:
logger.error("You need to run Python version 3.x! Your trying to run this with major version %s" % python_version)
# Set multiprocessing start method
mp.set_start_method('fork')
"""
Get the current version number
"""
def version():
return __version__
if __name__ == '__main__':
""" Main entry point for running on cmd line. """
start()
|
dataset.py
|
"""Data fetching
"""
# MIT License
#
# Copyright (c) 2019 Yichun Shi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import time
import math
import random
import shutil
from multiprocessing import Process, Queue
import h5py
import numpy as np
is_photo = lambda x: os.path.basename(x).startswith('P')
class DataClass(object):
def __init__(self, class_name, indices, label):
self.class_name = class_name
self.indices = np.array(indices)
self.label = label
return
def random_pc_pair(self):
photo_idx = np.random.permutation(self.photo_indices)[0]
caric_idx = np.random.permutation(self.caric_indices)[0]
return np.array([photo_idx, caric_idx])
class Dataset():
def __init__(self, path=None, prefix=None, isDebug=False):
self.DataClass = DataClass
self.num_classes = None
self.classes = None
self.images = None
self.labels = None
self.is_photo = None
self.idx2cls = None
self.batch_queue = None
self.batch_workers = None
self.isDebug = isDebug
if path is not None:
self.init_from_list(path, prefix)
def init_from_list(self, filename, prefix=None):
with open(filename, 'r') as f:
lines = f.readlines()
if self.isDebug:
lines = lines[0:100]
lines = [line.strip().split(' ') for line in lines]
assert len(lines)>0, \
'List file must be in format: "fullpath(str) label(int)"'
images = [line[0] for line in lines]
if prefix is not None:
print('Adding prefix: {}'.format(prefix))
images = [os.path.join(prefix, img) for img in images]
if len(lines[0]) > 1:
labels = [int(line[1]) for line in lines]
else:
labels = [os.path.dirname(img) for img in images]
_, labels = np.unique(labels, return_inverse=True)
self.images = np.array(images, dtype=np.object)
self.labels = np.array(labels, dtype=np.int32)
self.init_classes()
print('%d images of %d classes loaded' % (len(self.images), self.num_classes))
self.separate_photo_caricature()
def separate_photo_caricature(self):
self.is_photo = [is_photo(im) for im in self.images]
self.is_photo = np.array(self.is_photo, dtype=np.bool)
for c in self.classes:
c.photo_indices = c.indices[self.is_photo[c.indices]]
c.caric_indices = c.indices[~self.is_photo[c.indices]]
print('{} photos {} caricatures'.format(self.is_photo.sum(), (~self.is_photo).sum()))
return
def init_classes(self):
dict_classes = {}
classes = []
self.idx2cls = np.ndarray((len(self.labels),)).astype(np.object)
for i, label in enumerate(self.labels):
if not label in dict_classes:
dict_classes[label] = [i]
else:
dict_classes[label].append(i)
for label, indices in dict_classes.items():
classes.append(self.DataClass(str(label), indices, label))
self.idx2cls[indices] = classes[-1]
self.classes = np.array(classes, dtype=np.object)
self.num_classes = len(classes)
def build_subset_from_indices(self, indices, new_labels=True):
subset = type(self)()
subset.images = self.images[indices]
subset.labels = self.labels[indices]
if new_labels:
_, subset.labels = np.unique(subset.labels, return_inverse=True)
subset.init_classes()
print('built subset: %d images of %d classes' % (len(subset.images), subset.num_classes))
return subset
# Data Loading
def get_batch(self, batch_size):
''' Get random pairs of photos and caricatures. '''
indices_batch = []
# Random photo-caricature pair
assert batch_size % 2 == 0
classes = np.random.permutation(self.classes)[:batch_size//2]
indices_batch = np.concatenate([c.random_pc_pair() for c in classes], axis=0)
batch = {}
if len(indices_batch) > 0:
batch['images'] = self.images[indices_batch]
batch['labels'] = self.labels[indices_batch]
if self.is_photo is not None:
batch['is_photo'] = self.is_photo[indices_batch]
return batch
# Multithreading preprocessing images
def start_batch_queue(self, batch_size, proc_func=None, maxsize=1, num_threads=3):
self.batch_queue = Queue(maxsize=maxsize)
def batch_queue_worker(seed):
np.random.seed(seed)
while True:
batch = self.get_batch(batch_size)
if proc_func is not None:
batch['image_paths'] = batch['images']
batch['images'] = proc_func(batch['image_paths'])
self.batch_queue.put(batch)
self.batch_workers = []
for i in range(num_threads):
worker = Process(target=batch_queue_worker, args=(i,))
worker.daemon = True
worker.start()
self.batch_workers.append(worker)
def pop_batch_queue(self, timeout=60):
return self.batch_queue.get(block=True, timeout=timeout)
def release_queue(self):
if self.batch_queue is not None:
self.batch_queue.close()
if self.batch_workers is not None:
for w in self.batch_workers:
w.terminate()
del w
self.batch_workers = None
|
project_files_monitor_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import os
import socket
import tempfile
import threading
import unittest
from unittest.mock import MagicMock, patch
from .. import json_rpc, project_files_monitor
from ..analysis_directory import UpdatedPaths
from ..json_rpc import Request, read_request
from ..project_files_monitor import MonitorException, ProjectFilesMonitor
from ..socket_connection import SocketConnection, SocketException
from ..tests.mocks import mock_configuration
class MonitorTest(unittest.TestCase):
@patch.object(SocketConnection, "connect")
@patch.object(json_rpc, "perform_handshake")
# pyre-fixme[56]: Argument `tools.pyre.client.project_files_monitor` to
# decorator factory `unittest.mock.patch.object` could not be resolved in a global
# scope.
@patch.object(project_files_monitor, "find_parent_directory_containing_file")
def test_subscriptions(
self,
find_parent_directory_containing_file,
perform_handshake,
_socket_connection,
) -> None:
find_parent_directory_containing_file.return_value = "/ROOT"
configuration = mock_configuration()
analysis_directory = MagicMock()
analysis_directory.get_root.return_value = "/ROOT"
# no additional extensions
configuration.extensions = []
monitor = ProjectFilesMonitor(configuration, ".", analysis_directory)
self.assertEqual(len(monitor._subscriptions), 1)
subscription = monitor._subscriptions[0]
self.assertEqual(subscription.root, "/ROOT")
self.assertEqual(subscription.name, "pyre_file_change_subscription")
self.assertEqual(subscription.subscription["fields"], ["name"])
self.assertEqual(
subscription.subscription["expression"][0:2], ["allof", ["type", "f"]]
)
self.assertCountEqual(
subscription.subscription["expression"][2],
[
"anyof",
["suffix", "py"],
["suffix", "pyi"],
["suffix", "thrift"],
["match", "TARGETS"],
],
)
# additional extensions
configuration.get_valid_extensions = lambda: [".thrift", ".whl"]
monitor = ProjectFilesMonitor(configuration, ".", analysis_directory)
self.assertEqual(len(monitor._subscriptions), 1)
subscription = monitor._subscriptions[0]
self.assertEqual(subscription.root, "/ROOT")
self.assertEqual(subscription.name, "pyre_file_change_subscription")
self.assertEqual(subscription.subscription["fields"], ["name"])
self.assertEqual(
subscription.subscription["expression"][0:2], ["allof", ["type", "f"]]
)
self.assertCountEqual(
subscription.subscription["expression"][2],
[
"anyof",
["suffix", "py"],
["suffix", "pyi"],
["suffix", "thrift"],
["suffix", "whl"],
["match", "TARGETS"],
],
)
# no watchman root -> terminate
find_parent_directory_containing_file.return_value = None
self.assertRaises(
MonitorException,
ProjectFilesMonitor,
configuration,
".",
analysis_directory,
)
def test_bad_socket(self) -> None:
with tempfile.TemporaryDirectory() as root:
bad_socket_path = os.path.join(root, "bad.sock")
socket_connection = SocketConnection(bad_socket_path)
self.assertRaises(SocketException, socket_connection.connect)
@patch.object(ProjectFilesMonitor, "_find_watchman_path")
def test_socket_communication(self, _find_watchman_path) -> None:
# Create a "server" thread to complete the handshake
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
errors = []
with tempfile.TemporaryDirectory() as root:
socket_path = os.path.join(root, ".pyre", "server", "json_server.sock")
os.makedirs(os.path.dirname(socket_path))
socket_created_lock = threading.Lock()
socket_created_lock.acquire() # hold lock until server creates socket
def server():
server_socket.bind(socket_path)
server_socket.listen(1)
socket_created_lock.release()
connection, _ = server_socket.accept()
outfile = connection.makefile(mode="wb")
infile = connection.makefile(mode="rb")
request = Request(
method="handshake/server", parameters={"version": "123"}
)
request.write(outfile)
response = read_request(infile)
if not response or response.method != "handshake/client":
errors.append("Client handshake malformed")
return
request = Request(method="handshake/socket_added")
request.write(outfile)
updated_message = read_request(infile)
if (
not updated_message
or updated_message.method != "updateFiles"
or not updated_message.parameters
or updated_message.parameters.get("files")
!= ["/ANALYSIS/a.py", "/ANALYSIS/subdir/b.py"]
):
errors.append("Update message malformed")
server_thread = threading.Thread(target=server)
server_thread.start()
configuration = mock_configuration(version_hash="123")
configuration.log_directory = root + "/.pyre"
configuration.extensions = []
analysis_directory = MagicMock()
analysis_directory.process_updated_files.side_effect = (
lambda files: UpdatedPaths(
updated_paths=[file.replace("ROOT", "ANALYSIS") for file in files],
deleted_paths=[],
)
)
# only create the monitor once the socket is open
with socket_created_lock:
monitor = ProjectFilesMonitor(configuration, ".", analysis_directory)
monitor._handle_response(
{"root": "/ROOT", "files": ["a.py", "subdir/b.py"]}
)
analysis_directory.process_updated_files.assert_called_once_with(
["/ROOT/a.py", "/ROOT/subdir/b.py"]
)
server_thread.join()
self.assertEqual(errors, [])
@patch.object(SocketConnection, "connect")
# pyre-fixme[56]: Argument `tools.pyre.client.json_rpc` to decorator factory
# `unittest.mock.patch.object` could not be resolved in a global scope.
@patch.object(json_rpc, "perform_handshake")
@patch.object(ProjectFilesMonitor, "_watchman_client")
@patch.object(ProjectFilesMonitor, "_find_watchman_path")
def test_files_cleaned_up(
self,
_find_watchman_path,
_watchman_client,
perform_handshake,
_socket_connection,
) -> None:
with tempfile.TemporaryDirectory() as root:
configuration = mock_configuration()
configuration.extensions = []
analysis_directory = MagicMock()
analysis_directory.get_root.return_value = root
monitor = ProjectFilesMonitor(configuration, ".", analysis_directory)
monitor._alive = False # never enter watchman loop
monitor._run()
monitor_folder = os.path.join(".pyre", "file_monitor")
self.assertFalse(
os.path.exists(os.path.join(monitor_folder, "file_monitor.lock"))
)
self.assertFalse(
os.path.exists(os.path.join(monitor_folder, "file_monitor.pid"))
)
# pyre-fixme[56]: Argument `os.path` to decorator factory
# `unittest.mock.patch.object` could not be resolved in a global scope.
@patch.object(os.path, "realpath")
def test_socket_connection(self, realpath) -> None:
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
with tempfile.TemporaryDirectory() as root:
realpath.side_effect = lambda path: path.replace(
os.path.dirname(path), root # replace parent directories with tempdir
)
# Unix sockets have a limited length of ~100 characters, so the server uses
# symbolic links as a workaround. We need to properly translate these.
socket_link = os.path.join(
".pyre", "long_name" * 15, "server", "json_server.sock"
)
socket_path = os.path.join(root, "json_server.sock")
socket_created_lock = threading.Lock()
socket_created_lock.acquire() # hold lock until server creates socket
def server():
server_socket.bind(socket_path)
server_socket.listen(1)
socket_created_lock.release()
connection, _ = server_socket.accept()
server_thread = threading.Thread(target=server)
server_thread.start()
with socket_created_lock:
SocketConnection(socket_link).connect()
server_thread.join()
|
test__xxsubinterpreters.py
|
from collections import namedtuple
import contextlib
import itertools
import os
import pickle
import sys
from textwrap import dedent
import threading
import time
import unittest
from test import support
from test.support import import_helper
from test.support import script_helper
interpreters = import_helper.import_module('_xxsubinterpreters')
##################################
# helpers
def _captured_script(script):
r, w = os.pipe()
indented = script.replace('\n', '\n ')
wrapped = dedent(f"""
import contextlib
with open({w}, 'w', encoding="utf-8") as spipe:
with contextlib.redirect_stdout(spipe):
{indented}
""")
return wrapped, open(r, encoding="utf-8")
def _run_output(interp, request, shared=None):
script, rpipe = _captured_script(request)
with rpipe:
interpreters.run_string(interp, script, shared)
return rpipe.read()
def _wait_for_interp_to_run(interp, timeout=None):
# bpo-37224: Running this test file in multiprocesses will fail randomly.
# The failure reason is that the thread can't acquire the cpu to
# run subinterpreter eariler than the main thread in multiprocess.
if timeout is None:
timeout = support.SHORT_TIMEOUT
start_time = time.monotonic()
deadline = start_time + timeout
while not interpreters.is_running(interp):
if time.monotonic() > deadline:
raise RuntimeError('interp is not running')
time.sleep(0.010)
@contextlib.contextmanager
def _running(interp):
r, w = os.pipe()
def run():
interpreters.run_string(interp, dedent(f"""
# wait for "signal"
with open({r}, encoding="utf-8") as rpipe:
rpipe.read()
"""))
t = threading.Thread(target=run)
t.start()
_wait_for_interp_to_run(interp)
yield
with open(w, 'w', encoding="utf-8") as spipe:
spipe.write('done')
t.join()
#@contextmanager
#def run_threaded(id, source, **shared):
# def run():
# run_interp(id, source, **shared)
# t = threading.Thread(target=run)
# t.start()
# yield
# t.join()
def run_interp(id, source, **shared):
_run_interp(id, source, shared)
def _run_interp(id, source, shared, _mainns={}):
source = dedent(source)
main = interpreters.get_main()
if main == id:
if interpreters.get_current() != main:
raise RuntimeError
# XXX Run a func?
exec(source, _mainns)
else:
interpreters.run_string(id, source, shared)
class Interpreter(namedtuple('Interpreter', 'name id')):
@classmethod
def from_raw(cls, raw):
if isinstance(raw, cls):
return raw
elif isinstance(raw, str):
return cls(raw)
else:
raise NotImplementedError
def __new__(cls, name=None, id=None):
main = interpreters.get_main()
if id == main:
if not name:
name = 'main'
elif name != 'main':
raise ValueError(
'name mismatch (expected "main", got "{}")'.format(name))
id = main
elif id is not None:
if not name:
name = 'interp'
elif name == 'main':
raise ValueError('name mismatch (unexpected "main")')
if not isinstance(id, interpreters.InterpreterID):
id = interpreters.InterpreterID(id)
elif not name or name == 'main':
name = 'main'
id = main
else:
id = interpreters.create()
self = super().__new__(cls, name, id)
return self
# XXX expect_channel_closed() is unnecessary once we improve exc propagation.
@contextlib.contextmanager
def expect_channel_closed():
try:
yield
except interpreters.ChannelClosedError:
pass
else:
assert False, 'channel not closed'
class ChannelAction(namedtuple('ChannelAction', 'action end interp')):
def __new__(cls, action, end=None, interp=None):
if not end:
end = 'both'
if not interp:
interp = 'main'
self = super().__new__(cls, action, end, interp)
return self
def __init__(self, *args, **kwargs):
if self.action == 'use':
if self.end not in ('same', 'opposite', 'send', 'recv'):
raise ValueError(self.end)
elif self.action in ('close', 'force-close'):
if self.end not in ('both', 'same', 'opposite', 'send', 'recv'):
raise ValueError(self.end)
else:
raise ValueError(self.action)
if self.interp not in ('main', 'same', 'other', 'extra'):
raise ValueError(self.interp)
def resolve_end(self, end):
if self.end == 'same':
return end
elif self.end == 'opposite':
return 'recv' if end == 'send' else 'send'
else:
return self.end
def resolve_interp(self, interp, other, extra):
if self.interp == 'same':
return interp
elif self.interp == 'other':
if other is None:
raise RuntimeError
return other
elif self.interp == 'extra':
if extra is None:
raise RuntimeError
return extra
elif self.interp == 'main':
if interp.name == 'main':
return interp
elif other and other.name == 'main':
return other
else:
raise RuntimeError
# Per __init__(), there aren't any others.
class ChannelState(namedtuple('ChannelState', 'pending closed')):
def __new__(cls, pending=0, *, closed=False):
self = super().__new__(cls, pending, closed)
return self
def incr(self):
return type(self)(self.pending + 1, closed=self.closed)
def decr(self):
return type(self)(self.pending - 1, closed=self.closed)
def close(self, *, force=True):
if self.closed:
if not force or self.pending == 0:
return self
return type(self)(0 if force else self.pending, closed=True)
def run_action(cid, action, end, state, *, hideclosed=True):
if state.closed:
if action == 'use' and end == 'recv' and state.pending:
expectfail = False
else:
expectfail = True
else:
expectfail = False
try:
result = _run_action(cid, action, end, state)
except interpreters.ChannelClosedError:
if not hideclosed and not expectfail:
raise
result = state.close()
else:
if expectfail:
raise ... # XXX
return result
def _run_action(cid, action, end, state):
if action == 'use':
if end == 'send':
interpreters.channel_send(cid, b'spam')
return state.incr()
elif end == 'recv':
if not state.pending:
try:
interpreters.channel_recv(cid)
except interpreters.ChannelEmptyError:
return state
else:
raise Exception('expected ChannelEmptyError')
else:
interpreters.channel_recv(cid)
return state.decr()
else:
raise ValueError(end)
elif action == 'close':
kwargs = {}
if end in ('recv', 'send'):
kwargs[end] = True
interpreters.channel_close(cid, **kwargs)
return state.close()
elif action == 'force-close':
kwargs = {
'force': True,
}
if end in ('recv', 'send'):
kwargs[end] = True
interpreters.channel_close(cid, **kwargs)
return state.close(force=True)
else:
raise ValueError(action)
def clean_up_interpreters():
for id in interpreters.list_all():
if id == 0: # main
continue
try:
interpreters.destroy(id)
except RuntimeError:
pass # already destroyed
def clean_up_channels():
for cid in interpreters.channel_list_all():
try:
interpreters.channel_destroy(cid)
except interpreters.ChannelNotFoundError:
pass # already destroyed
class TestBase(unittest.TestCase):
def tearDown(self):
clean_up_interpreters()
clean_up_channels()
##################################
# misc. tests
class IsShareableTests(unittest.TestCase):
def test_default_shareables(self):
shareables = [
# singletons
None,
# builtin objects
b'spam',
'spam',
10,
-10,
]
for obj in shareables:
with self.subTest(obj):
self.assertTrue(
interpreters.is_shareable(obj))
def test_not_shareable(self):
class Cheese:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class SubBytes(bytes):
"""A subclass of a shareable type."""
not_shareables = [
# singletons
True,
False,
NotImplemented,
...,
# builtin types and objects
type,
object,
object(),
Exception(),
100.0,
# user-defined types and objects
Cheese,
Cheese('Wensleydale'),
SubBytes(b'spam'),
]
for obj in not_shareables:
with self.subTest(repr(obj)):
self.assertFalse(
interpreters.is_shareable(obj))
class ShareableTypeTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.cid = interpreters.channel_create()
def tearDown(self):
interpreters.channel_destroy(self.cid)
super().tearDown()
def _assert_values(self, values):
for obj in values:
with self.subTest(obj):
interpreters.channel_send(self.cid, obj)
got = interpreters.channel_recv(self.cid)
self.assertEqual(got, obj)
self.assertIs(type(got), type(obj))
# XXX Check the following in the channel tests?
#self.assertIsNot(got, obj)
def test_singletons(self):
for obj in [None]:
with self.subTest(obj):
interpreters.channel_send(self.cid, obj)
got = interpreters.channel_recv(self.cid)
# XXX What about between interpreters?
self.assertIs(got, obj)
def test_types(self):
self._assert_values([
b'spam',
9999,
self.cid,
])
def test_bytes(self):
self._assert_values(i.to_bytes(2, 'little', signed=True)
for i in range(-1, 258))
def test_strs(self):
self._assert_values(['hello world', '你好世界', ''])
def test_int(self):
self._assert_values(itertools.chain(range(-1, 258),
[sys.maxsize, -sys.maxsize - 1]))
def test_non_shareable_int(self):
ints = [
sys.maxsize + 1,
-sys.maxsize - 2,
2**1000,
]
for i in ints:
with self.subTest(i):
with self.assertRaises(OverflowError):
interpreters.channel_send(self.cid, i)
##################################
# interpreter tests
class ListAllTests(TestBase):
def test_initial(self):
main = interpreters.get_main()
ids = interpreters.list_all()
self.assertEqual(ids, [main])
def test_after_creating(self):
main = interpreters.get_main()
first = interpreters.create()
second = interpreters.create()
ids = interpreters.list_all()
self.assertEqual(ids, [main, first, second])
def test_after_destroying(self):
main = interpreters.get_main()
first = interpreters.create()
second = interpreters.create()
interpreters.destroy(first)
ids = interpreters.list_all()
self.assertEqual(ids, [main, second])
class GetCurrentTests(TestBase):
def test_main(self):
main = interpreters.get_main()
cur = interpreters.get_current()
self.assertEqual(cur, main)
self.assertIsInstance(cur, interpreters.InterpreterID)
def test_subinterpreter(self):
main = interpreters.get_main()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
cur = _interpreters.get_current()
print(cur)
assert isinstance(cur, _interpreters.InterpreterID)
"""))
cur = int(out.strip())
_, expected = interpreters.list_all()
self.assertEqual(cur, expected)
self.assertNotEqual(cur, main)
class GetMainTests(TestBase):
def test_from_main(self):
[expected] = interpreters.list_all()
main = interpreters.get_main()
self.assertEqual(main, expected)
self.assertIsInstance(main, interpreters.InterpreterID)
def test_from_subinterpreter(self):
[expected] = interpreters.list_all()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
main = _interpreters.get_main()
print(main)
assert isinstance(main, _interpreters.InterpreterID)
"""))
main = int(out.strip())
self.assertEqual(main, expected)
class IsRunningTests(TestBase):
def test_main(self):
main = interpreters.get_main()
self.assertTrue(interpreters.is_running(main))
@unittest.skip('Fails on FreeBSD')
def test_subinterpreter(self):
interp = interpreters.create()
self.assertFalse(interpreters.is_running(interp))
with _running(interp):
self.assertTrue(interpreters.is_running(interp))
self.assertFalse(interpreters.is_running(interp))
def test_from_subinterpreter(self):
interp = interpreters.create()
out = _run_output(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
if _interpreters.is_running({interp}):
print(True)
else:
print(False)
"""))
self.assertEqual(out.strip(), 'True')
def test_already_destroyed(self):
interp = interpreters.create()
interpreters.destroy(interp)
with self.assertRaises(RuntimeError):
interpreters.is_running(interp)
def test_does_not_exist(self):
with self.assertRaises(RuntimeError):
interpreters.is_running(1_000_000)
def test_bad_id(self):
with self.assertRaises(ValueError):
interpreters.is_running(-1)
class InterpreterIDTests(TestBase):
def test_with_int(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(int(id), 10)
def test_coerce_id(self):
class Int(str):
def __index__(self):
return 10
id = interpreters.InterpreterID(Int(), force=True)
self.assertEqual(int(id), 10)
def test_bad_id(self):
self.assertRaises(TypeError, interpreters.InterpreterID, object())
self.assertRaises(TypeError, interpreters.InterpreterID, 10.0)
self.assertRaises(TypeError, interpreters.InterpreterID, '10')
self.assertRaises(TypeError, interpreters.InterpreterID, b'10')
self.assertRaises(ValueError, interpreters.InterpreterID, -1)
self.assertRaises(OverflowError, interpreters.InterpreterID, 2**64)
def test_does_not_exist(self):
id = interpreters.channel_create()
with self.assertRaises(RuntimeError):
interpreters.InterpreterID(int(id) + 1) # unforced
def test_str(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(str(id), '10')
def test_repr(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(repr(id), 'InterpreterID(10)')
def test_equality(self):
id1 = interpreters.create()
id2 = interpreters.InterpreterID(int(id1))
id3 = interpreters.create()
self.assertTrue(id1 == id1)
self.assertTrue(id1 == id2)
self.assertTrue(id1 == int(id1))
self.assertTrue(int(id1) == id1)
self.assertTrue(id1 == float(int(id1)))
self.assertTrue(float(int(id1)) == id1)
self.assertFalse(id1 == float(int(id1)) + 0.1)
self.assertFalse(id1 == str(int(id1)))
self.assertFalse(id1 == 2**1000)
self.assertFalse(id1 == float('inf'))
self.assertFalse(id1 == 'spam')
self.assertFalse(id1 == id3)
self.assertFalse(id1 != id1)
self.assertFalse(id1 != id2)
self.assertTrue(id1 != id3)
class CreateTests(TestBase):
def test_in_main(self):
id = interpreters.create()
self.assertIsInstance(id, interpreters.InterpreterID)
self.assertIn(id, interpreters.list_all())
@unittest.skip('enable this test when working on pystate.c')
def test_unique_id(self):
seen = set()
for _ in range(100):
id = interpreters.create()
interpreters.destroy(id)
seen.add(id)
self.assertEqual(len(seen), 100)
def test_in_thread(self):
lock = threading.Lock()
id = None
def f():
nonlocal id
id = interpreters.create()
lock.acquire()
lock.release()
t = threading.Thread(target=f)
with lock:
t.start()
t.join()
self.assertIn(id, interpreters.list_all())
def test_in_subinterpreter(self):
main, = interpreters.list_all()
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
print(id)
assert isinstance(id, _interpreters.InterpreterID)
"""))
id2 = int(out.strip())
self.assertEqual(set(interpreters.list_all()), {main, id1, id2})
def test_in_threaded_subinterpreter(self):
main, = interpreters.list_all()
id1 = interpreters.create()
id2 = None
def f():
nonlocal id2
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
print(id)
"""))
id2 = int(out.strip())
t = threading.Thread(target=f)
t.start()
t.join()
self.assertEqual(set(interpreters.list_all()), {main, id1, id2})
def test_after_destroy_all(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
ids = []
for _ in range(3):
id = interpreters.create()
ids.append(id)
# Now destroy them.
for id in ids:
interpreters.destroy(id)
# Finally, create another.
id = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {id})
def test_after_destroy_some(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
id1 = interpreters.create()
id2 = interpreters.create()
id3 = interpreters.create()
# Now destroy 2 of them.
interpreters.destroy(id1)
interpreters.destroy(id3)
# Finally, create another.
id = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {id, id2})
class DestroyTests(TestBase):
def test_one(self):
id1 = interpreters.create()
id2 = interpreters.create()
id3 = interpreters.create()
self.assertIn(id2, interpreters.list_all())
interpreters.destroy(id2)
self.assertNotIn(id2, interpreters.list_all())
self.assertIn(id1, interpreters.list_all())
self.assertIn(id3, interpreters.list_all())
def test_all(self):
before = set(interpreters.list_all())
ids = set()
for _ in range(3):
id = interpreters.create()
ids.add(id)
self.assertEqual(set(interpreters.list_all()), before | ids)
for id in ids:
interpreters.destroy(id)
self.assertEqual(set(interpreters.list_all()), before)
def test_main(self):
main, = interpreters.list_all()
with self.assertRaises(RuntimeError):
interpreters.destroy(main)
def f():
with self.assertRaises(RuntimeError):
interpreters.destroy(main)
t = threading.Thread(target=f)
t.start()
t.join()
def test_already_destroyed(self):
id = interpreters.create()
interpreters.destroy(id)
with self.assertRaises(RuntimeError):
interpreters.destroy(id)
def test_does_not_exist(self):
with self.assertRaises(RuntimeError):
interpreters.destroy(1_000_000)
def test_bad_id(self):
with self.assertRaises(ValueError):
interpreters.destroy(-1)
def test_from_current(self):
main, = interpreters.list_all()
id = interpreters.create()
script = dedent(f"""
import _xxsubinterpreters as _interpreters
try:
_interpreters.destroy({id})
except RuntimeError:
pass
""")
interpreters.run_string(id, script)
self.assertEqual(set(interpreters.list_all()), {main, id})
def test_from_sibling(self):
main, = interpreters.list_all()
id1 = interpreters.create()
id2 = interpreters.create()
script = dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.destroy({id2})
""")
interpreters.run_string(id1, script)
self.assertEqual(set(interpreters.list_all()), {main, id1})
def test_from_other_thread(self):
id = interpreters.create()
def f():
interpreters.destroy(id)
t = threading.Thread(target=f)
t.start()
t.join()
def test_still_running(self):
main, = interpreters.list_all()
interp = interpreters.create()
with _running(interp):
self.assertTrue(interpreters.is_running(interp),
msg=f"Interp {interp} should be running before destruction.")
with self.assertRaises(RuntimeError,
msg=f"Should not be able to destroy interp {interp} while it's still running."):
interpreters.destroy(interp)
self.assertTrue(interpreters.is_running(interp))
class RunStringTests(TestBase):
def setUp(self):
super().setUp()
self.id = interpreters.create()
def test_success(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
interpreters.run_string(self.id, script)
out = file.read()
self.assertEqual(out, 'it worked!')
def test_in_thread(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
def f():
interpreters.run_string(self.id, script)
t = threading.Thread(target=f)
t.start()
t.join()
out = file.read()
self.assertEqual(out, 'it worked!')
def test_create_thread(self):
subinterp = interpreters.create(isolated=False)
script, file = _captured_script("""
import threading
def f():
print('it worked!', end='')
t = threading.Thread(target=f)
t.start()
t.join()
""")
with file:
interpreters.run_string(subinterp, script)
out = file.read()
self.assertEqual(out, 'it worked!')
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
def test_fork(self):
import tempfile
with tempfile.NamedTemporaryFile('w+', encoding="utf-8") as file:
file.write('')
file.flush()
expected = 'spam spam spam spam spam'
script = dedent(f"""
import os
try:
os.fork()
except RuntimeError:
with open('{file.name}', 'w', encoding='utf-8') as out:
out.write('{expected}')
""")
interpreters.run_string(self.id, script)
file.seek(0)
content = file.read()
self.assertEqual(content, expected)
def test_already_running(self):
with _running(self.id):
with self.assertRaises(RuntimeError):
interpreters.run_string(self.id, 'print("spam")')
def test_does_not_exist(self):
id = 0
while id in interpreters.list_all():
id += 1
with self.assertRaises(RuntimeError):
interpreters.run_string(id, 'print("spam")')
def test_error_id(self):
with self.assertRaises(ValueError):
interpreters.run_string(-1, 'print("spam")')
def test_bad_id(self):
with self.assertRaises(TypeError):
interpreters.run_string('spam', 'print("spam")')
def test_bad_script(self):
with self.assertRaises(TypeError):
interpreters.run_string(self.id, 10)
def test_bytes_for_script(self):
with self.assertRaises(TypeError):
interpreters.run_string(self.id, b'print("spam")')
@contextlib.contextmanager
def assert_run_failed(self, exctype, msg=None):
with self.assertRaises(interpreters.RunFailedError) as caught:
yield
if msg is None:
self.assertEqual(str(caught.exception).split(':')[0],
str(exctype))
else:
self.assertEqual(str(caught.exception),
"{}: {}".format(exctype, msg))
def test_invalid_syntax(self):
with self.assert_run_failed(SyntaxError):
# missing close paren
interpreters.run_string(self.id, 'print("spam"')
def test_failure(self):
with self.assert_run_failed(Exception, 'spam'):
interpreters.run_string(self.id, 'raise Exception("spam")')
def test_SystemExit(self):
with self.assert_run_failed(SystemExit, '42'):
interpreters.run_string(self.id, 'raise SystemExit(42)')
def test_sys_exit(self):
with self.assert_run_failed(SystemExit):
interpreters.run_string(self.id, dedent("""
import sys
sys.exit()
"""))
with self.assert_run_failed(SystemExit, '42'):
interpreters.run_string(self.id, dedent("""
import sys
sys.exit(42)
"""))
def test_with_shared(self):
r, w = os.pipe()
shared = {
'spam': b'ham',
'eggs': b'-1',
'cheddar': None,
}
script = dedent(f"""
eggs = int(eggs)
spam = 42
result = spam + eggs
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script, shared)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['spam'], 42)
self.assertEqual(ns['eggs'], -1)
self.assertEqual(ns['result'], 41)
self.assertIsNone(ns['cheddar'])
def test_shared_overwrites(self):
interpreters.run_string(self.id, dedent("""
spam = 'eggs'
ns1 = dict(vars())
del ns1['__builtins__']
"""))
shared = {'spam': b'ham'}
script = dedent(f"""
ns2 = dict(vars())
del ns2['__builtins__']
""")
interpreters.run_string(self.id, script, shared)
r, w = os.pipe()
script = dedent(f"""
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['ns1']['spam'], 'eggs')
self.assertEqual(ns['ns2']['spam'], b'ham')
self.assertEqual(ns['spam'], b'ham')
def test_shared_overwrites_default_vars(self):
r, w = os.pipe()
shared = {'__name__': b'not __main__'}
script = dedent(f"""
spam = 42
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script, shared)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['__name__'], b'not __main__')
def test_main_reused(self):
r, w = os.pipe()
interpreters.run_string(self.id, dedent(f"""
spam = True
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
del ns, pickle, chan
"""))
with open(r, 'rb') as chan:
ns1 = pickle.load(chan)
r, w = os.pipe()
interpreters.run_string(self.id, dedent(f"""
eggs = False
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
"""))
with open(r, 'rb') as chan:
ns2 = pickle.load(chan)
self.assertIn('spam', ns1)
self.assertNotIn('eggs', ns1)
self.assertIn('eggs', ns2)
self.assertIn('spam', ns2)
def test_execution_namespace_is_main(self):
r, w = os.pipe()
script = dedent(f"""
spam = 42
ns = dict(vars())
ns['__builtins__'] = str(ns['__builtins__'])
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
ns.pop('__builtins__')
ns.pop('__loader__')
self.assertEqual(ns, {
'__name__': '__main__',
'__annotations__': {},
'__doc__': None,
'__package__': None,
'__spec__': None,
'spam': 42,
})
# XXX Fix this test!
@unittest.skip('blocking forever')
def test_still_running_at_exit(self):
script = dedent(f"""
from textwrap import dedent
import threading
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
def f():
_interpreters.run_string(id, dedent('''
import time
# Give plenty of time for the main interpreter to finish.
time.sleep(1_000_000)
'''))
t = threading.Thread(target=f)
t.start()
""")
with support.temp_dir() as dirname:
filename = script_helper.make_script(dirname, 'interp', script)
with script_helper.spawn_python(filename) as proc:
retcode = proc.wait()
self.assertEqual(retcode, 0)
##################################
# channel tests
class ChannelIDTests(TestBase):
def test_default_kwargs(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(int(cid), 10)
self.assertEqual(cid.end, 'both')
def test_with_kwargs(self):
cid = interpreters._channel_id(10, send=True, force=True)
self.assertEqual(cid.end, 'send')
cid = interpreters._channel_id(10, send=True, recv=False, force=True)
self.assertEqual(cid.end, 'send')
cid = interpreters._channel_id(10, recv=True, force=True)
self.assertEqual(cid.end, 'recv')
cid = interpreters._channel_id(10, recv=True, send=False, force=True)
self.assertEqual(cid.end, 'recv')
cid = interpreters._channel_id(10, send=True, recv=True, force=True)
self.assertEqual(cid.end, 'both')
def test_coerce_id(self):
class Int(str):
def __index__(self):
return 10
cid = interpreters._channel_id(Int(), force=True)
self.assertEqual(int(cid), 10)
def test_bad_id(self):
self.assertRaises(TypeError, interpreters._channel_id, object())
self.assertRaises(TypeError, interpreters._channel_id, 10.0)
self.assertRaises(TypeError, interpreters._channel_id, '10')
self.assertRaises(TypeError, interpreters._channel_id, b'10')
self.assertRaises(ValueError, interpreters._channel_id, -1)
self.assertRaises(OverflowError, interpreters._channel_id, 2**64)
def test_bad_kwargs(self):
with self.assertRaises(ValueError):
interpreters._channel_id(10, send=False, recv=False)
def test_does_not_exist(self):
cid = interpreters.channel_create()
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters._channel_id(int(cid) + 1) # unforced
def test_str(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(str(cid), '10')
def test_repr(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(repr(cid), 'ChannelID(10)')
cid = interpreters._channel_id(10, send=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10, send=True)')
cid = interpreters._channel_id(10, recv=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10, recv=True)')
cid = interpreters._channel_id(10, send=True, recv=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10)')
def test_equality(self):
cid1 = interpreters.channel_create()
cid2 = interpreters._channel_id(int(cid1))
cid3 = interpreters.channel_create()
self.assertTrue(cid1 == cid1)
self.assertTrue(cid1 == cid2)
self.assertTrue(cid1 == int(cid1))
self.assertTrue(int(cid1) == cid1)
self.assertTrue(cid1 == float(int(cid1)))
self.assertTrue(float(int(cid1)) == cid1)
self.assertFalse(cid1 == float(int(cid1)) + 0.1)
self.assertFalse(cid1 == str(int(cid1)))
self.assertFalse(cid1 == 2**1000)
self.assertFalse(cid1 == float('inf'))
self.assertFalse(cid1 == 'spam')
self.assertFalse(cid1 == cid3)
self.assertFalse(cid1 != cid1)
self.assertFalse(cid1 != cid2)
self.assertTrue(cid1 != cid3)
class ChannelTests(TestBase):
def test_create_cid(self):
cid = interpreters.channel_create()
self.assertIsInstance(cid, interpreters.ChannelID)
def test_sequential_ids(self):
before = interpreters.channel_list_all()
id1 = interpreters.channel_create()
id2 = interpreters.channel_create()
id3 = interpreters.channel_create()
after = interpreters.channel_list_all()
self.assertEqual(id2, int(id1) + 1)
self.assertEqual(id3, int(id2) + 1)
self.assertEqual(set(after) - set(before), {id1, id2, id3})
def test_ids_global(self):
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
print(cid)
"""))
cid1 = int(out.strip())
id2 = interpreters.create()
out = _run_output(id2, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
print(cid)
"""))
cid2 = int(out.strip())
self.assertEqual(cid2, int(cid1) + 1)
def test_channel_list_interpreters_none(self):
"""Test listing interpreters for a channel with no associations."""
# Test for channel with no associated interpreters.
cid = interpreters.channel_create()
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(send_interps, [])
self.assertEqual(recv_interps, [])
def test_channel_list_interpreters_basic(self):
"""Test basic listing channel interpreters."""
interp0 = interpreters.get_main()
cid = interpreters.channel_create()
interpreters.channel_send(cid, "send")
# Test for a channel that has one end associated to an interpreter.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(send_interps, [interp0])
self.assertEqual(recv_interps, [])
interp1 = interpreters.create()
_run_output(interp1, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
# Test for channel that has boths ends associated to an interpreter.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(send_interps, [interp0])
self.assertEqual(recv_interps, [interp1])
def test_channel_list_interpreters_multiple(self):
"""Test listing interpreters for a channel with many associations."""
interp0 = interpreters.get_main()
interp1 = interpreters.create()
interp2 = interpreters.create()
interp3 = interpreters.create()
cid = interpreters.channel_create()
interpreters.channel_send(cid, "send")
_run_output(interp1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, "send")
"""))
_run_output(interp2, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
_run_output(interp3, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(set(send_interps), {interp0, interp1})
self.assertEqual(set(recv_interps), {interp2, interp3})
def test_channel_list_interpreters_destroyed(self):
"""Test listing channel interpreters with a destroyed interpreter."""
interp0 = interpreters.get_main()
interp1 = interpreters.create()
cid = interpreters.channel_create()
interpreters.channel_send(cid, "send")
_run_output(interp1, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
# Should be one interpreter associated with each end.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(send_interps, [interp0])
self.assertEqual(recv_interps, [interp1])
interpreters.destroy(interp1)
# Destroyed interpreter should not be listed.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(send_interps, [interp0])
self.assertEqual(recv_interps, [])
def test_channel_list_interpreters_released(self):
"""Test listing channel interpreters with a released channel."""
# Set up one channel with main interpreter on the send end and two
# subinterpreters on the receive end.
interp0 = interpreters.get_main()
interp1 = interpreters.create()
interp2 = interpreters.create()
cid = interpreters.channel_create()
interpreters.channel_send(cid, "data")
_run_output(interp1, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
interpreters.channel_send(cid, "data")
_run_output(interp2, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
# Check the setup.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(send_interps), 1)
self.assertEqual(len(recv_interps), 2)
# Release the main interpreter from the send end.
interpreters.channel_release(cid, send=True)
# Send end should have no associated interpreters.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(send_interps), 0)
self.assertEqual(len(recv_interps), 2)
# Release one of the subinterpreters from the receive end.
_run_output(interp2, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_release({cid})
"""))
# Receive end should have the released interpreter removed.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(send_interps), 0)
self.assertEqual(recv_interps, [interp1])
def test_channel_list_interpreters_closed(self):
"""Test listing channel interpreters with a closed channel."""
interp0 = interpreters.get_main()
interp1 = interpreters.create()
cid = interpreters.channel_create()
# Put something in the channel so that it's not empty.
interpreters.channel_send(cid, "send")
# Check initial state.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(send_interps), 1)
self.assertEqual(len(recv_interps), 0)
# Force close the channel.
interpreters.channel_close(cid, force=True)
# Both ends should raise an error.
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=False)
def test_channel_list_interpreters_closed_send_end(self):
"""Test listing channel interpreters with a channel's send end closed."""
interp0 = interpreters.get_main()
interp1 = interpreters.create()
cid = interpreters.channel_create()
# Put something in the channel so that it's not empty.
interpreters.channel_send(cid, "send")
# Check initial state.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(send_interps), 1)
self.assertEqual(len(recv_interps), 0)
# Close the send end of the channel.
interpreters.channel_close(cid, send=True)
# Send end should raise an error.
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=True)
# Receive end should not be closed (since channel is not empty).
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(recv_interps), 0)
# Close the receive end of the channel from a subinterpreter.
_run_output(interp1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_close({cid}, force=True)
"""))
# Both ends should raise an error.
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=False)
####################
def test_send_recv_main(self):
cid = interpreters.channel_create()
orig = b'spam'
interpreters.channel_send(cid, orig)
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, orig)
self.assertIsNot(obj, orig)
def test_send_recv_same_interpreter(self):
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
orig = b'spam'
_interpreters.channel_send(cid, orig)
obj = _interpreters.channel_recv(cid)
assert obj is not orig
assert obj == orig
"""))
def test_send_recv_different_interpreters(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
out = _run_output(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_send_recv_different_threads(self):
cid = interpreters.channel_create()
def f():
while True:
try:
obj = interpreters.channel_recv(cid)
break
except interpreters.ChannelEmptyError:
time.sleep(0.1)
interpreters.channel_send(cid, obj)
t = threading.Thread(target=f)
t.start()
interpreters.channel_send(cid, b'spam')
t.join()
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_send_recv_different_interpreters_and_threads(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
out = None
def f():
nonlocal out
out = _run_output(id1, dedent(f"""
import time
import _xxsubinterpreters as _interpreters
while True:
try:
obj = _interpreters.channel_recv({cid})
break
except _interpreters.ChannelEmptyError:
time.sleep(0.1)
assert(obj == b'spam')
_interpreters.channel_send({cid}, b'eggs')
"""))
t = threading.Thread(target=f)
t.start()
interpreters.channel_send(cid, b'spam')
t.join()
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'eggs')
def test_send_not_found(self):
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_send(10, b'spam')
def test_recv_not_found(self):
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_recv(10)
def test_recv_empty(self):
cid = interpreters.channel_create()
with self.assertRaises(interpreters.ChannelEmptyError):
interpreters.channel_recv(cid)
def test_recv_default(self):
default = object()
cid = interpreters.channel_create()
obj1 = interpreters.channel_recv(cid, default)
interpreters.channel_send(cid, None)
interpreters.channel_send(cid, 1)
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'eggs')
obj2 = interpreters.channel_recv(cid, default)
obj3 = interpreters.channel_recv(cid, default)
obj4 = interpreters.channel_recv(cid)
obj5 = interpreters.channel_recv(cid, default)
obj6 = interpreters.channel_recv(cid, default)
self.assertIs(obj1, default)
self.assertIs(obj2, None)
self.assertEqual(obj3, 1)
self.assertEqual(obj4, b'spam')
self.assertEqual(obj5, b'eggs')
self.assertIs(obj6, default)
def test_run_string_arg_unresolved(self):
cid = interpreters.channel_create()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
print(cid.end)
_interpreters.channel_send(cid, b'spam')
"""),
dict(cid=cid.send))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
self.assertEqual(out.strip(), 'send')
# XXX For now there is no high-level channel into which the
# sent channel ID can be converted...
# Note: this test caused crashes on some buildbots (bpo-33615).
@unittest.skip('disabled until high-level channels exist')
def test_run_string_arg_resolved(self):
cid = interpreters.channel_create()
cid = interpreters._channel_id(cid, _resolve=True)
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
print(chan.id.end)
_interpreters.channel_send(chan.id, b'spam')
"""),
dict(chan=cid.send))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
self.assertEqual(out.strip(), 'send')
# close
def test_close_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_multiple_users(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
id2 = interpreters.create()
interpreters.run_string(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
interpreters.run_string(id2, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_recv({cid})
"""))
interpreters.channel_close(cid)
with self.assertRaises(interpreters.RunFailedError) as cm:
interpreters.run_string(id1, dedent(f"""
_interpreters.channel_send({cid}, b'spam')
"""))
self.assertIn('ChannelClosedError', str(cm.exception))
with self.assertRaises(interpreters.RunFailedError) as cm:
interpreters.run_string(id2, dedent(f"""
_interpreters.channel_send({cid}, b'spam')
"""))
self.assertIn('ChannelClosedError', str(cm.exception))
def test_close_multiple_times(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(cid)
def test_close_empty(self):
tests = [
(False, False),
(True, False),
(False, True),
(True, True),
]
for send, recv in tests:
with self.subTest((send, recv)):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid, send=send, recv=recv)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_defaults_with_unused_items(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
def test_close_recv_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid, recv=True)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
interpreters.channel_close(cid, recv=True)
def test_close_send_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_both_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid, recv=True, send=True)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
interpreters.channel_close(cid, recv=True)
def test_close_recv_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, recv=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_send_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_both_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True, recv=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_never_used(self):
cid = interpreters.channel_create()
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_by_unassociated_interp(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_close({cid}, force=True)
"""))
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(cid)
def test_close_used_multiple_times_by_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_channel_list_interpreters_invalid_channel(self):
cid = interpreters.channel_create()
# Test for invalid channel ID.
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_list_interpreters(1000, send=True)
interpreters.channel_close(cid)
# Test for a channel that has been closed.
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=True)
def test_channel_list_interpreters_invalid_args(self):
# Tests for invalid arguments passed to the API.
cid = interpreters.channel_create()
with self.assertRaises(TypeError):
interpreters.channel_list_interpreters(cid)
class ChannelReleaseTests(TestBase):
# XXX Add more test coverage a la the tests for close().
"""
- main / interp / other
- run in: current thread / new thread / other thread / different threads
- end / opposite
- force / no force
- used / not used (associated / not associated)
- empty / emptied / never emptied / partly emptied
- closed / not closed
- released / not released
- creator (interp) / other
- associated interpreter not running
- associated interpreter destroyed
"""
"""
use
pre-release
release
after
check
"""
"""
release in: main, interp1
creator: same, other (incl. interp2)
use: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
pre-release: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all
pre-release forced: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all
release: same
release forced: same
use after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
release after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
check released: send/recv for same/other(incl. interp2)
check closed: send/recv for same/other(incl. interp2)
"""
def test_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_multiple_users(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
id2 = interpreters.create()
interpreters.run_string(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
out = _run_output(id2, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
_interpreters.channel_release({cid})
print(repr(obj))
"""))
interpreters.run_string(id1, dedent(f"""
_interpreters.channel_release({cid})
"""))
self.assertEqual(out.strip(), "b'spam'")
def test_no_kwargs(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_multiple_times(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_release(cid, send=True, recv=True)
def test_with_unused_items(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_never_used(self):
cid = interpreters.channel_create()
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_by_unassociated_interp(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_release({cid})
"""))
obj = interpreters.channel_recv(cid)
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
self.assertEqual(obj, b'spam')
def test_close_if_unassociated(self):
# XXX Something's not right with this test...
cid = interpreters.channel_create()
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_send({cid}, b'spam')
_interpreters.channel_release({cid})
"""))
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_partially(self):
# XXX Is partial close too weird/confusing?
cid = interpreters.channel_create()
interpreters.channel_send(cid, None)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'spam')
interpreters.channel_release(cid, send=True)
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_used_multiple_times_by_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
class ChannelCloseFixture(namedtuple('ChannelCloseFixture',
'end interp other extra creator')):
# Set this to True to avoid creating interpreters, e.g. when
# scanning through test permutations without running them.
QUICK = False
def __new__(cls, end, interp, other, extra, creator):
assert end in ('send', 'recv')
if cls.QUICK:
known = {}
else:
interp = Interpreter.from_raw(interp)
other = Interpreter.from_raw(other)
extra = Interpreter.from_raw(extra)
known = {
interp.name: interp,
other.name: other,
extra.name: extra,
}
if not creator:
creator = 'same'
self = super().__new__(cls, end, interp, other, extra, creator)
self._prepped = set()
self._state = ChannelState()
self._known = known
return self
@property
def state(self):
return self._state
@property
def cid(self):
try:
return self._cid
except AttributeError:
creator = self._get_interpreter(self.creator)
self._cid = self._new_channel(creator)
return self._cid
def get_interpreter(self, interp):
interp = self._get_interpreter(interp)
self._prep_interpreter(interp)
return interp
def expect_closed_error(self, end=None):
if end is None:
end = self.end
if end == 'recv' and self.state.closed == 'send':
return False
return bool(self.state.closed)
def prep_interpreter(self, interp):
self._prep_interpreter(interp)
def record_action(self, action, result):
self._state = result
def clean_up(self):
clean_up_interpreters()
clean_up_channels()
# internal methods
def _new_channel(self, creator):
if creator.name == 'main':
return interpreters.channel_create()
else:
ch = interpreters.channel_create()
run_interp(creator.id, f"""
import _xxsubinterpreters
cid = _xxsubinterpreters.channel_create()
# We purposefully send back an int to avoid tying the
# channel to the other interpreter.
_xxsubinterpreters.channel_send({ch}, int(cid))
del _xxsubinterpreters
""")
self._cid = interpreters.channel_recv(ch)
return self._cid
def _get_interpreter(self, interp):
if interp in ('same', 'interp'):
return self.interp
elif interp == 'other':
return self.other
elif interp == 'extra':
return self.extra
else:
name = interp
try:
interp = self._known[name]
except KeyError:
interp = self._known[name] = Interpreter(name)
return interp
def _prep_interpreter(self, interp):
if interp.id in self._prepped:
return
self._prepped.add(interp.id)
if interp.name == 'main':
return
run_interp(interp.id, f"""
import _xxsubinterpreters as interpreters
import test.test__xxsubinterpreters as helpers
ChannelState = helpers.ChannelState
try:
cid
except NameError:
cid = interpreters._channel_id({self.cid})
""")
@unittest.skip('these tests take several hours to run')
class ExhaustiveChannelTests(TestBase):
"""
- main / interp / other
- run in: current thread / new thread / other thread / different threads
- end / opposite
- force / no force
- used / not used (associated / not associated)
- empty / emptied / never emptied / partly emptied
- closed / not closed
- released / not released
- creator (interp) / other
- associated interpreter not running
- associated interpreter destroyed
- close after unbound
"""
"""
use
pre-close
close
after
check
"""
"""
close in: main, interp1
creator: same, other, extra
use: None,send,recv,send/recv in None,same,other,same+other,all
pre-close: None,send,recv in None,same,other,same+other,all
pre-close forced: None,send,recv in None,same,other,same+other,all
close: same
close forced: same
use after: None,send,recv,send/recv in None,same,other,extra,same+other,all
close after: None,send,recv,send/recv in None,same,other,extra,same+other,all
check closed: send/recv for same/other(incl. interp2)
"""
def iter_action_sets(self):
# - used / not used (associated / not associated)
# - empty / emptied / never emptied / partly emptied
# - closed / not closed
# - released / not released
# never used
yield []
# only pre-closed (and possible used after)
for closeactions in self._iter_close_action_sets('same', 'other'):
yield closeactions
for postactions in self._iter_post_close_action_sets():
yield closeactions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
yield closeactions
for postactions in self._iter_post_close_action_sets():
yield closeactions + postactions
# used
for useactions in self._iter_use_action_sets('same', 'other'):
yield useactions
for closeactions in self._iter_close_action_sets('same', 'other'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for useactions in self._iter_use_action_sets('other', 'extra'):
yield useactions
for closeactions in self._iter_close_action_sets('same', 'other'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
def _iter_use_action_sets(self, interp1, interp2):
interps = (interp1, interp2)
# only recv end used
yield [
ChannelAction('use', 'recv', interp1),
]
yield [
ChannelAction('use', 'recv', interp2),
]
yield [
ChannelAction('use', 'recv', interp1),
ChannelAction('use', 'recv', interp2),
]
# never emptied
yield [
ChannelAction('use', 'send', interp1),
]
yield [
ChannelAction('use', 'send', interp2),
]
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
]
# partially emptied
for interp1 in interps:
for interp2 in interps:
for interp3 in interps:
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
ChannelAction('use', 'recv', interp3),
]
# fully emptied
for interp1 in interps:
for interp2 in interps:
for interp3 in interps:
for interp4 in interps:
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
ChannelAction('use', 'recv', interp3),
ChannelAction('use', 'recv', interp4),
]
def _iter_close_action_sets(self, interp1, interp2):
ends = ('recv', 'send')
interps = (interp1, interp2)
for force in (True, False):
op = 'force-close' if force else 'close'
for interp in interps:
for end in ends:
yield [
ChannelAction(op, end, interp),
]
for recvop in ('close', 'force-close'):
for sendop in ('close', 'force-close'):
for recv in interps:
for send in interps:
yield [
ChannelAction(recvop, 'recv', recv),
ChannelAction(sendop, 'send', send),
]
def _iter_post_close_action_sets(self):
for interp in ('same', 'extra', 'other'):
yield [
ChannelAction('use', 'recv', interp),
]
yield [
ChannelAction('use', 'send', interp),
]
def run_actions(self, fix, actions):
for action in actions:
self.run_action(fix, action)
def run_action(self, fix, action, *, hideclosed=True):
end = action.resolve_end(fix.end)
interp = action.resolve_interp(fix.interp, fix.other, fix.extra)
fix.prep_interpreter(interp)
if interp.name == 'main':
result = run_action(
fix.cid,
action.action,
end,
fix.state,
hideclosed=hideclosed,
)
fix.record_action(action, result)
else:
_cid = interpreters.channel_create()
run_interp(interp.id, f"""
result = helpers.run_action(
{fix.cid},
{repr(action.action)},
{repr(end)},
{repr(fix.state)},
hideclosed={hideclosed},
)
interpreters.channel_send({_cid}, result.pending.to_bytes(1, 'little'))
interpreters.channel_send({_cid}, b'X' if result.closed else b'')
""")
result = ChannelState(
pending=int.from_bytes(interpreters.channel_recv(_cid), 'little'),
closed=bool(interpreters.channel_recv(_cid)),
)
fix.record_action(action, result)
def iter_fixtures(self):
# XXX threads?
interpreters = [
('main', 'interp', 'extra'),
('interp', 'main', 'extra'),
('interp1', 'interp2', 'extra'),
('interp1', 'interp2', 'main'),
]
for interp, other, extra in interpreters:
for creator in ('same', 'other', 'creator'):
for end in ('send', 'recv'):
yield ChannelCloseFixture(end, interp, other, extra, creator)
def _close(self, fix, *, force):
op = 'force-close' if force else 'close'
close = ChannelAction(op, fix.end, 'same')
if not fix.expect_closed_error():
self.run_action(fix, close, hideclosed=False)
else:
with self.assertRaises(interpreters.ChannelClosedError):
self.run_action(fix, close, hideclosed=False)
def _assert_closed_in_interp(self, fix, interp=None):
if interp is None or interp.name == 'main':
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(fix.cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(fix.cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(fix.cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(fix.cid, force=True)
else:
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_recv(cid)
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_send(cid, b'spam')
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_close(cid)
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_close(cid, force=True)
""")
def _assert_closed(self, fix):
self.assertTrue(fix.state.closed)
for _ in range(fix.state.pending):
interpreters.channel_recv(fix.cid)
self._assert_closed_in_interp(fix)
for interp in ('same', 'other'):
interp = fix.get_interpreter(interp)
if interp.name == 'main':
continue
self._assert_closed_in_interp(fix, interp)
interp = fix.get_interpreter('fresh')
self._assert_closed_in_interp(fix, interp)
def _iter_close_tests(self, verbose=False):
i = 0
for actions in self.iter_action_sets():
print()
for fix in self.iter_fixtures():
i += 1
if i > 1000:
return
if verbose:
if (i - 1) % 6 == 0:
print()
print(i, fix, '({} actions)'.format(len(actions)))
else:
if (i - 1) % 6 == 0:
print(' ', end='')
print('.', end=''); sys.stdout.flush()
yield i, fix, actions
if verbose:
print('---')
print()
# This is useful for scanning through the possible tests.
def _skim_close_tests(self):
ChannelCloseFixture.QUICK = True
for i, fix, actions in self._iter_close_tests():
pass
def test_close(self):
for i, fix, actions in self._iter_close_tests():
with self.subTest('{} {} {}'.format(i, fix, actions)):
fix.prep_interpreter(fix.interp)
self.run_actions(fix, actions)
self._close(fix, force=False)
self._assert_closed(fix)
# XXX Things slow down if we have too many interpreters.
fix.clean_up()
def test_force_close(self):
for i, fix, actions in self._iter_close_tests():
with self.subTest('{} {} {}'.format(i, fix, actions)):
fix.prep_interpreter(fix.interp)
self.run_actions(fix, actions)
self._close(fix, force=True)
self._assert_closed(fix)
# XXX Things slow down if we have too many interpreters.
fix.clean_up()
if __name__ == '__main__':
unittest.main()
|
microtvm_api_server.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import atexit
import collections
import collections.abc
import enum
import fcntl
import logging
import os
import os.path
import pathlib
import queue
import re
import select
import shlex
import shutil
import subprocess
import sys
import tarfile
import tempfile
import threading
import time
import serial
import serial.tools.list_ports
import yaml
from tvm.micro.project_api import server
_LOG = logging.getLogger(__name__)
API_SERVER_DIR = pathlib.Path(os.path.dirname(__file__) or os.path.getcwd())
BUILD_DIR = API_SERVER_DIR / "build"
MODEL_LIBRARY_FORMAT_RELPATH = "model.tar"
IS_TEMPLATE = not (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH).exists()
# Data structure to hold the information microtvm_api_server.py needs
# to communicate with each of these boards.
BOARD_PROPERTIES = {
"qemu_x86": {
"board": "qemu_x86",
"model": "host",
},
"qemu_riscv32": {
"board": "qemu_riscv32",
"model": "host",
},
"qemu_riscv64": {
"board": "qemu_riscv64",
"model": "host",
},
"mps2_an521": {
"board": "mps2_an521",
"model": "mps2_an521",
},
"nrf5340dk_nrf5340_cpuapp": {
"board": "nrf5340dk_nrf5340_cpuapp",
"model": "nrf5340dk",
},
"stm32f746xx_disco": {
"board": "stm32f746xx_disco",
"model": "stm32f746xx",
},
"nucleo_f746zg": {
"board": "nucleo_f746zg",
"model": "stm32f746xx",
},
"nucleo_l4r5zi": {
"board": "nucleo_l4r5zi",
"model": "stm32l4r5zi",
},
"qemu_cortex_r5": {
"board": "qemu_cortex_r5",
"model": "zynq_mp_r5",
},
}
def check_call(cmd_args, *args, **kwargs):
cwd_str = "" if "cwd" not in kwargs else f" (in cwd: {kwargs['cwd']})"
_LOG.info("run%s: %s", cwd_str, " ".join(shlex.quote(a) for a in cmd_args))
return subprocess.check_call(cmd_args, *args, **kwargs)
CACHE_ENTRY_RE = re.compile(r"(?P<name>[^:]+):(?P<type>[^=]+)=(?P<value>.*)")
CMAKE_BOOL_MAP = dict(
[(k, True) for k in ("1", "ON", "YES", "TRUE", "Y")]
+ [(k, False) for k in ("0", "OFF", "NO", "FALSE", "N", "IGNORE", "NOTFOUND", "")]
)
class CMakeCache(collections.abc.Mapping):
def __init__(self, path):
self._path = path
self._dict = None
def __iter__(self):
return iter(self._dict)
def __getitem__(self, key):
if self._dict is None:
self._dict = self._read_cmake_cache()
return self._dict[key]
def __len__(self):
return len(self._dict)
def _read_cmake_cache(self):
"""Read a CMakeCache.txt-like file and return a dictionary of values."""
entries = collections.OrderedDict()
with open(self._path, encoding="utf-8") as f:
for line in f:
m = CACHE_ENTRY_RE.match(line.rstrip("\n"))
if not m:
continue
if m.group("type") == "BOOL":
value = CMAKE_BOOL_MAP[m.group("value").upper()]
else:
value = m.group("value")
entries[m.group("name")] = value
return entries
CMAKE_CACHE = CMakeCache(BUILD_DIR / "CMakeCache.txt")
class BoardError(Exception):
"""Raised when an attached board cannot be opened (i.e. missing /dev nodes, etc)."""
class BoardAutodetectFailed(Exception):
"""Raised when no attached hardware is found matching the board= given to ZephyrCompiler."""
def _get_flash_runner():
flash_runner = CMAKE_CACHE.get("ZEPHYR_BOARD_FLASH_RUNNER")
if flash_runner is not None:
return flash_runner
with open(CMAKE_CACHE["ZEPHYR_RUNNERS_YAML"]) as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
return doc["flash-runner"]
def _get_device_args(options):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return _get_nrf_device_args(options)
if flash_runner == "openocd":
return _get_openocd_device_args(options)
raise BoardError(
f"Don't know how to find serial terminal for board {CMAKE_CACHE['BOARD']} with flash "
f"runner {flash_runner}"
)
# kwargs passed to usb.core.find to find attached boards for the openocd flash runner.
BOARD_USB_FIND_KW = {
"nucleo_l4r5zi": {"idVendor": 0x0483, "idProduct": 0x374B},
"nucleo_f746zg": {"idVendor": 0x0483, "idProduct": 0x374B},
"stm32f746g_disco": {"idVendor": 0x0483, "idProduct": 0x374B},
}
def openocd_serial(options):
"""Find the serial port to use for a board with OpenOCD flash strategy."""
if "openocd_serial" in options:
return options["openocd_serial"]
import usb # pylint: disable=import-outside-toplevel
find_kw = BOARD_USB_FIND_KW[CMAKE_CACHE["BOARD"]]
boards = usb.core.find(find_all=True, **find_kw)
serials = []
for b in boards:
serials.append(b.serial_number)
if len(serials) == 0:
raise BoardAutodetectFailed(f"No attached USB devices matching: {find_kw!r}")
serials.sort()
autodetected_openocd_serial = serials[0]
_LOG.debug("zephyr openocd driver: autodetected serial %s", serials[0])
return autodetected_openocd_serial
def _get_openocd_device_args(options):
return ["--serial", openocd_serial(options)]
def _get_nrf_device_args(options):
nrfjprog_args = ["nrfjprog", "--ids"]
nrfjprog_ids = subprocess.check_output(nrfjprog_args, encoding="utf-8")
if not nrfjprog_ids.strip("\n"):
raise BoardAutodetectFailed(f'No attached boards recognized by {" ".join(nrfjprog_args)}')
boards = nrfjprog_ids.split("\n")[:-1]
if len(boards) > 1:
if options["nrfjprog_snr"] is None:
raise BoardError(
"Multiple boards connected; specify one with nrfjprog_snr=: " f'{", ".join(boards)}'
)
if str(options["nrfjprog_snr"]) not in boards:
raise BoardError(
f"nrfjprog_snr ({options['nrfjprog_snr']}) not found in {nrfjprog_args}: {boards}"
)
return ["--snr", options["nrfjprog_snr"]]
if not boards:
return []
return ["--snr", boards[0]]
PROJECT_TYPES = []
if IS_TEMPLATE:
for d in (API_SERVER_DIR / "src").iterdir():
if d.is_dir():
PROJECT_TYPES.append(d.name)
PROJECT_OPTIONS = [
server.ProjectOption(
"extra_files_tar",
help="If given, during generate_project, uncompress the tarball at this path into the project dir.",
),
server.ProjectOption(
"gdbserver_port", help=("If given, port number to use when running the local gdbserver.")
),
server.ProjectOption(
"nrfjprog_snr",
help=("When used with nRF targets, serial # of the attached board to use, from nrfjprog."),
),
server.ProjectOption(
"openocd_serial",
help=("When used with OpenOCD targets, serial # of the attached board to use."),
),
server.ProjectOption(
"project_type",
help="Type of project to generate.",
choices=tuple(PROJECT_TYPES),
),
server.ProjectOption("verbose", help="Run build with verbose output.", choices=(True, False)),
server.ProjectOption(
"west_cmd",
help=(
"Path to the west tool. If given, supersedes both the zephyr_base "
"option and ZEPHYR_BASE environment variable."
),
),
server.ProjectOption("zephyr_base", help="Path to the zephyr base directory."),
server.ProjectOption(
"zephyr_board",
choices=list(BOARD_PROPERTIES),
help="Name of the Zephyr board to build for.",
),
server.ProjectOption(
"zephyr_model",
choices=[board["model"] for _, board in BOARD_PROPERTIES.items()],
help="Name of the model for each Zephyr board.",
),
]
class Handler(server.ProjectAPIHandler):
def __init__(self):
super(Handler, self).__init__()
self._proc = None
def server_info_query(self, tvm_version):
return server.ServerInfo(
platform_name="zephyr",
is_template=IS_TEMPLATE,
model_library_format_path=""
if IS_TEMPLATE
else (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH),
project_options=PROJECT_OPTIONS,
)
# These files and directories will be recursively copied into generated projects from the CRT.
CRT_COPY_ITEMS = ("include", "Makefile", "src")
# Maps extra line added to prj.conf to a tuple or list of zephyr_board for which it is needed.
EXTRA_PRJ_CONF_DIRECTIVES = {
"CONFIG_TIMER_RANDOM_GENERATOR=y": (
"qemu_x86",
"qemu_riscv32",
"qemu_cortex_r5",
"qemu_riscv64",
),
"CONFIG_ENTROPY_GENERATOR=y": (
"mps2_an521",
"nrf5340dk_nrf5340_cpuapp",
"nucleo_f746zg",
"nucleo_l4r5zi",
"stm32f746g_disco",
),
}
def _create_prj_conf(self, project_dir, options):
with open(project_dir / "prj.conf", "w") as f:
f.write(
"# For UART used from main().\n"
"CONFIG_RING_BUFFER=y\n"
"CONFIG_UART_CONSOLE=n\n"
"CONFIG_UART_INTERRUPT_DRIVEN=y\n"
"\n"
)
f.write("# For TVMPlatformAbort().\n" "CONFIG_REBOOT=y\n" "\n")
if options["project_type"] == "host_driven":
f.write("# For RPC server C++ bindings.\n" "CONFIG_CPLUSPLUS=y\n" "\n")
f.write("# For math routines\n" "CONFIG_NEWLIB_LIBC=y\n" "\n")
if self._has_fpu(options["zephyr_board"]):
f.write("# For models with floating point.\n" "CONFIG_FPU=y\n" "\n")
main_stack_size = None
if self._is_qemu(options) and options["project_type"] == "host_driven":
main_stack_size = 1536
# Set main stack size, if needed.
if main_stack_size is not None:
f.write(f"CONFIG_MAIN_STACK_SIZE={main_stack_size}\n")
f.write("# For random number generation.\n" "CONFIG_TEST_RANDOM_GENERATOR=y\n")
f.write("\n# Extra prj.conf directives\n")
for line, board_list in self.EXTRA_PRJ_CONF_DIRECTIVES.items():
if options["zephyr_board"] in board_list:
f.write(f"{line}\n")
f.write("\n")
API_SERVER_CRT_LIBS_TOKEN = "<API_SERVER_CRT_LIBS>"
CRT_LIBS_BY_PROJECT_TYPE = {
"host_driven": "microtvm_rpc_server microtvm_rpc_common common",
"aot_demo": "memory microtvm_rpc_common common",
}
def generate_project(self, model_library_format_path, standalone_crt_dir, project_dir, options):
project_dir = pathlib.Path(project_dir)
# Make project directory.
project_dir.mkdir()
# Copy ourselves to the generated project. TVM may perform further build steps on the generated project
# by launching the copy.
shutil.copy2(__file__, project_dir / os.path.basename(__file__))
# Place Model Library Format tarball in the special location, which this script uses to decide
# whether it's being invoked in a template or generated project.
project_model_library_format_tar_path = project_dir / MODEL_LIBRARY_FORMAT_RELPATH
shutil.copy2(model_library_format_path, project_model_library_format_tar_path)
# Extract Model Library Format tarball.into <project_dir>/model.
extract_path = os.path.splitext(project_model_library_format_tar_path)[0]
with tarfile.TarFile(project_model_library_format_tar_path) as tf:
os.makedirs(extract_path)
tf.extractall(path=extract_path)
if self._is_qemu(options):
shutil.copytree(API_SERVER_DIR / "qemu-hack", project_dir / "qemu-hack")
# Populate CRT.
crt_path = project_dir / "crt"
crt_path.mkdir()
for item in self.CRT_COPY_ITEMS:
src_path = os.path.join(standalone_crt_dir, item)
dst_path = crt_path / item
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path)
else:
shutil.copy2(src_path, dst_path)
# Populate Makefile.
with open(API_SERVER_DIR / "CMakeLists.txt.template", "r") as cmake_template_f:
with open(project_dir / "CMakeLists.txt", "w") as cmake_f:
for line in cmake_template_f:
if self.API_SERVER_CRT_LIBS_TOKEN in line:
crt_libs = self.CRT_LIBS_BY_PROJECT_TYPE[options["project_type"]]
line = line.replace("<API_SERVER_CRT_LIBS>", crt_libs)
cmake_f.write(line)
self._create_prj_conf(project_dir, options)
# Populate crt-config.h
crt_config_dir = project_dir / "crt_config"
crt_config_dir.mkdir()
shutil.copy2(
API_SERVER_DIR / "crt_config" / "crt_config.h", crt_config_dir / "crt_config.h"
)
# Populate src/
src_dir = project_dir / "src"
shutil.copytree(API_SERVER_DIR / "src" / options["project_type"], src_dir)
# Populate extra_files
if options.get("extra_files_tar"):
with tarfile.open(options["extra_files_tar"], mode="r:*") as tf:
tf.extractall(project_dir)
def build(self, options):
BUILD_DIR.mkdir()
cmake_args = ["cmake", ".."]
if options.get("verbose"):
cmake_args.append("-DCMAKE_VERBOSE_MAKEFILE:BOOL=TRUE")
if options.get("zephyr_base"):
cmake_args.append(f"-DZEPHYR_BASE:STRING={options['zephyr_base']}")
if options.get("west_cmd"):
cmake_args.append(f"-DWEST={options['west_cmd']}")
cmake_args.append(f"-DBOARD:STRING={options['zephyr_board']}")
check_call(cmake_args, cwd=BUILD_DIR)
args = ["make", "-j2"]
if options.get("verbose"):
args.append("VERBOSE=1")
check_call(args, cwd=BUILD_DIR)
# A list of all zephyr_board values which are known to launch using QEMU. Many platforms which
# launch through QEMU by default include "qemu" in their name. However, not all do. This list
# includes those tested platforms which do not include qemu.
_KNOWN_QEMU_ZEPHYR_BOARDS = ("mps2_an521",)
@classmethod
def _is_qemu(cls, options):
return (
"qemu" in options["zephyr_board"]
or options["zephyr_board"] in cls._KNOWN_QEMU_ZEPHYR_BOARDS
)
_KNOWN_FPU_ZEPHYR_BOARDS = (
"nucleo_f746zg",
"nucleo_l4r5zi",
"nrf5340dk_nrf5340_cpuapp",
"qemu_cortex_r5",
"qemu_riscv32",
"qemu_riscv64",
"qemu_x86",
"stm32f746g_disco",
)
@classmethod
def _has_fpu(cls, zephyr_board):
return zephyr_board in cls._KNOWN_FPU_ZEPHYR_BOARDS
def flash(self, options):
if self._is_qemu(options):
return # NOTE: qemu requires no flash step--it is launched from open_transport.
zephyr_board = options["zephyr_board"]
# The nRF5340DK requires an additional `nrfjprog --recover` before each flash cycle.
# This is because readback protection is enabled by default when this device is flashed.
# Otherwise, flashing may fail with an error such as the following:
# ERROR: The operation attempted is unavailable due to readback protection in
# ERROR: your device. Please use --recover to unlock the device.
if zephyr_board.startswith("nrf5340dk") and _get_flash_runner() == "nrfjprog":
recover_args = ["nrfjprog", "--recover"]
recover_args.extend(_get_nrf_device_args(options))
check_call(recover_args, cwd=API_SERVER_DIR / "build")
check_call(["make", "flash"], cwd=API_SERVER_DIR / "build")
def open_transport(self, options):
if self._is_qemu(options):
transport = ZephyrQemuTransport(options)
else:
transport = ZephyrSerialTransport(options)
to_return = transport.open()
self._transport = transport
atexit.register(lambda: self.close_transport())
return to_return
def close_transport(self):
if self._transport is not None:
self._transport.close()
self._transport = None
def read_transport(self, n, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.read(n, timeout_sec)
def write_transport(self, data, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.write(data, timeout_sec)
def _set_nonblock(fd):
flag = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK)
new_flag = fcntl.fcntl(fd, fcntl.F_GETFL)
assert (new_flag & os.O_NONBLOCK) != 0, "Cannot set file descriptor {fd} to non-blocking"
class ZephyrSerialTransport:
@classmethod
def _lookup_baud_rate(cls, options):
zephyr_base = options.get("zephyr_base", os.environ["ZEPHYR_BASE"])
sys.path.insert(0, os.path.join(zephyr_base, "scripts", "dts"))
try:
import dtlib # pylint: disable=import-outside-toplevel
finally:
sys.path.pop(0)
dt_inst = dtlib.DT(BUILD_DIR / "zephyr" / "zephyr.dts")
uart_baud = (
dt_inst.get_node("/chosen")
.props["zephyr,console"]
.to_path()
.props["current-speed"]
.to_num()
)
_LOG.debug("zephyr transport: found UART baudrate from devicetree: %d", uart_baud)
return uart_baud
@classmethod
def _find_nrf_serial_port(cls, options):
com_ports = subprocess.check_output(
["nrfjprog", "--com"] + _get_device_args(options), encoding="utf-8"
)
ports_by_vcom = {}
for line in com_ports.split("\n")[:-1]:
parts = line.split()
ports_by_vcom[parts[2]] = parts[1]
return ports_by_vcom["VCOM2"]
@classmethod
def _find_openocd_serial_port(cls, options):
serial_number = openocd_serial(options)
ports = [p for p in serial.tools.list_ports.grep(serial_number)]
if len(ports) != 1:
raise Exception(
f"_find_openocd_serial_port: expected 1 port to match {serial_number}, "
f"found: {ports!r}"
)
return ports[0].device
@classmethod
def _find_serial_port(cls, options):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return cls._find_nrf_serial_port(options)
if flash_runner == "openocd":
return cls._find_openocd_serial_port(options)
raise FlashRunnerNotSupported(
f"Don't know how to deduce serial port for flash runner {flash_runner}"
)
def __init__(self, options):
self._options = options
self._port = None
def open(self):
port_path = self._find_serial_port(self._options)
self._port = serial.Serial(port_path, baudrate=self._lookup_baud_rate(self._options))
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=5.0,
session_established_timeout_sec=5.0,
)
def close(self):
self._port.close()
self._port = None
def read(self, n, timeout_sec):
self._port.timeout = timeout_sec
to_return = self._port.read(n)
if not to_return:
raise server.IoTimeoutError()
return to_return
def write(self, data, timeout_sec):
self._port.write_timeout = timeout_sec
bytes_written = 0
while bytes_written < len(data):
n = self._port.write(data)
data = data[n:]
bytes_written += n
class ZephyrQemuMakeResult(enum.Enum):
QEMU_STARTED = "qemu_started"
MAKE_FAILED = "make_failed"
EOF = "eof"
class ZephyrQemuTransport:
"""The user-facing Zephyr QEMU transport class."""
def __init__(self, options):
self.options = options
self.proc = None
self.pipe_dir = None
self.read_fd = None
self.write_fd = None
self._queue = queue.Queue()
def open(self):
self.pipe_dir = pathlib.Path(tempfile.mkdtemp())
self.pipe = self.pipe_dir / "fifo"
self.write_pipe = self.pipe_dir / "fifo.in"
self.read_pipe = self.pipe_dir / "fifo.out"
os.mkfifo(self.write_pipe)
os.mkfifo(self.read_pipe)
if "gdbserver_port" in self.options:
if "env" in self.kwargs:
self.kwargs["env"] = copy.copy(self.kwargs["env"])
else:
self.kwargs["env"] = os.environ.copy()
self.kwargs["env"]["TVM_QEMU_GDBSERVER_PORT"] = str(self.options["gdbserver_port"])
self.proc = subprocess.Popen(
["make", "run", f"QEMU_PIPE={self.pipe}"],
cwd=BUILD_DIR,
stdout=subprocess.PIPE,
)
self._wait_for_qemu()
# NOTE: although each pipe is unidirectional, open both as RDWR to work around a select
# limitation on linux. Without this, non-blocking I/O can't use timeouts because named
# FIFO are always considered ready to read when no one has opened them for writing.
self.read_fd = os.open(self.read_pipe, os.O_RDWR | os.O_NONBLOCK)
self.write_fd = os.open(self.write_pipe, os.O_RDWR | os.O_NONBLOCK)
_set_nonblock(self.read_fd)
_set_nonblock(self.write_fd)
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=10.0,
session_established_timeout_sec=10.0,
)
def close(self):
did_write = False
if self.write_fd is not None:
try:
server.write_with_timeout(
self.write_fd, b"\x01x", 1.0
) # Use a short timeout since we will kill the process
did_write = True
except server.IoTimeoutError:
pass
os.close(self.write_fd)
self.write_fd = None
if self.proc:
if not did_write:
self.proc.terminate()
try:
self.proc.wait(5.0)
except subprocess.TimeoutExpired:
self.proc.kill()
if self.read_fd:
os.close(self.read_fd)
self.read_fd = None
if self.pipe_dir is not None:
shutil.rmtree(self.pipe_dir)
self.pipe_dir = None
def read(self, n, timeout_sec):
return server.read_with_timeout(self.read_fd, n, timeout_sec)
def write(self, data, timeout_sec):
to_write = bytearray()
escape_pos = []
for i, b in enumerate(data):
if b == 0x01:
to_write.append(b)
escape_pos.append(i)
to_write.append(b)
while to_write:
num_written = server.write_with_timeout(self.write_fd, to_write, timeout_sec)
to_write = to_write[num_written:]
def _qemu_check_stdout(self):
for line in self.proc.stdout:
line = str(line)
_LOG.info("%s", line)
if "[QEMU] CPU" in line:
self._queue.put(ZephyrQemuMakeResult.QEMU_STARTED)
else:
line = re.sub("[^a-zA-Z0-9 \n]", "", line)
pattern = r"recipe for target (\w*) failed"
if re.search(pattern, line, re.IGNORECASE):
self._queue.put(ZephyrQemuMakeResult.MAKE_FAILED)
self._queue.put(ZephyrQemuMakeResult.EOF)
def _wait_for_qemu(self):
threading.Thread(target=self._qemu_check_stdout, daemon=True).start()
while True:
try:
item = self._queue.get(timeout=120)
except Exception:
raise TimeoutError("QEMU setup timeout.")
if item == ZephyrQemuMakeResult.QEMU_STARTED:
break
if item in [ZephyrQemuMakeResult.MAKE_FAILED, ZephyrQemuMakeResult.EOF]:
raise RuntimeError("QEMU setup failed.")
raise ValueError(f"{item} not expected.")
if __name__ == "__main__":
server.main(Handler())
|
socketserverhandler.py
|
import socket as pythonsocket
from threading import Thread
from time import sleep
import datetime
import pickle
import datetime
import database
import reddit
import string
import random
import settings
socket = pythonsocket.socket(pythonsocket.AF_INET, pythonsocket.SOCK_STREAM)
def startServer():
database.beginDataBaseConnection()
database.initDatabase()
server_address = (settings.server_location, int(settings.server_port))
print('Starting server on %s port %s' % server_address)
socket.setsockopt(pythonsocket.SOL_SOCKET, pythonsocket.SO_REUSEADDR, 1)
socket.settimeout(None)
socket.bind(server_address)
socket.listen(5)
socket.settimeout(None)
thread = Thread(target=waitConnect)
thread.start()
servertick = Thread(target=serverTick)
servertick.start()
clients = []
class Client():
def __init__(self, connection, address, authorized):
self.connection = connection
self.address = address
self.authorized = authorized
self.key = None
self.username = None
self.editingScript = None
self.disconnect = False
self.lastPing = datetime.datetime.now()
self.scriptsComplete = []
def waitConnect():
print("Server wait client thread started")
while True:
sleep(0.1)
connection, address = socket.accept()
print("%s Client connected on %s" % (datetime.datetime.now(), address))
client = Client(connection, address, False)
clients.append(client)
clientthread = Thread(target=clientTick, args=[clients[len(clients) - 1]])
clientthread.start()
def getAllClientConnections():
return [client.connection for client in clients]
def sendToAllClients(payload):
for client_con in getAllClientConnections():
try:
sendToClient(client_con, payload)
except Exception:
print("couldn't send to connection %s" % client_con)
def clientTick(client):
print("Server tick thread started for client")
HEADERSIZE = 10
while True:
if client.disconnect:
print("%s SERVER user %s disconnected" % (datetime.datetime.now(), repr(client.username)))
break
full_msg = b''
new_msg = True
while True:
try:
client_connection = client.connection
buf = client_connection.recv(2048)
if new_msg:
try:
msglen = int(buf[:HEADERSIZE])
except ValueError:
print("client disconnect error")
# happens when client disconnects
break
new_msg = False
full_msg += buf
except ConnectionResetError:
print("%s SERVER user %s connecton reset error" % (datetime.datetime.now(), repr(client.username)))
break
download_size = len(full_msg) - HEADERSIZE
if download_size == msglen:
if download_size > 100000:
print(
"%s SERVER received large message (%s)" % (
datetime.datetime.now(), str(download_size / 1000000) + "MB"))
try:
incomingdata = pickle.loads(full_msg[HEADERSIZE:])
except EOFError:
print("%s SERVER user %s disconnected" % (datetime.datetime.now(), repr(client.username)))
break
new_msg = True
full_msg = b""
if not client.authorized:
if "login-attempt" == incomingdata[0]:
print("%s SERVER user %s login attempt" % (datetime.datetime.now(), repr(incomingdata[1])))
username = incomingdata[1]
password = incomingdata[2]
login = (database.login(username, password))
online_users = database.getOnlineUsers()
if username in online_users:
print("%s SERVER user %s already logged in" % (
datetime.datetime.now(), repr(incomingdata[1])))
sendToClient(client_connection, ("login-success", False, None))
else:
if login:
key = generateKey()
client.key = key
client.username = username
sendToClient(client_connection, ("login-success", True, key))
client.authorized = True
print("%s SERVER user %s logged in" % (datetime.datetime.now(), repr(incomingdata[1])))
database.updateUserStatus(username, "ONLINE")
else:
sendToClient(client_connection, ("login-success", False, None))
print("%s SERVER user %s wrong password" % (
datetime.datetime.now(), repr(incomingdata[1])))
else:
if "request-scripts" == incomingdata[1]:
print("%s SERVER user %s request scripts" % (datetime.datetime.now(), repr(client.username)))
if incomingdata[0] == client.key:
print("%s SERVER sending scripts to user %s" % (
datetime.datetime.now(), repr(client.username)))
amount = incomingdata[2]
filter = incomingdata[3]
if filter == "ups":
data = database.getScripts(amount, "ups")
sendToClient(client_connection, ("scripts-return", data, settings.music_types))
elif filter == "latest posts":
data = database.getScripts(amount, "timecreated")
sendToClient(client_connection, ("scripts-return", data, settings.music_types))
elif filter == "recently added":
data = database.getScripts(amount, "timegathered")
sendToClient(client_connection, ("scripts-return", data, settings.music_types))
elif filter == "comments":
data = database.getScripts(amount, "num_comments")
sendToClient(client_connection, ("scripts-return", data, settings.music_types))
pass
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "edit-script" == incomingdata[1]:
scriptno = incomingdata[2]
print("%s SERVER user %s request to edit script %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
if incomingdata[0] == client.key:
script_status = database.getScriptStatus(scriptno)
if script_status == "RAW":
print("%s SERVER allowing user %s to edit script %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
client.editingScript = scriptno
database.updateScriptStatus("EDITING", client.username, scriptno)
sendToClient(client.connection, ('edit-script-success', True, scriptno))
sendToAllClients(('script-status-update', scriptno, "EDITING", client.username))
print("%s SERVER sending all clients (%s) status update for %s" % (
datetime.datetime.now(), len(getAllClientConnections()), scriptno))
elif script_status == "EDITING":
print("%s SERVER refusing user %s to edit script %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
sendToClient(client.connection, ('edit-script-success', False, scriptno))
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "upload-video" == incomingdata[1]:
if incomingdata[0] == client.key:
scriptno = incomingdata[2]
video_generator_payload = incomingdata[3]
script_status = database.getScriptStatus(scriptno)
if script_status == "EDITING":
if scriptno == client.editingScript:
print("%s SERVER allowing user %s to upload script number %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
if database.uploadVid(video_generator_payload, scriptno):
database.updateScriptStatus("COMPLETE", client.username, scriptno)
sendToClient(client_connection, ('script-upload-success', True, scriptno))
client.scriptsComplete.append(scriptno)
client.editingScript = None
else:
sendToClient(client_connection, ('script-upload-success', False, scriptno))
sendToAllClients(('script-status-update', scriptno, "COMPLETE", client.username))
else:
print(
"%s SERVER user %s script number %s does not match what client is editing %s" % (
datetime.datetime.now(), repr(client.username), scriptno,
client.editingScript))
else:
print("%s SERVER user %s script status is %s" % (
datetime.datetime.now(), repr(client.username), script_status))
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "quit-editing" == incomingdata[1]:
if incomingdata[0] == client.key:
scriptno = incomingdata[2]
if client.editingScript == scriptno:
database.updateScriptStatus("RAW", None, scriptno)
print("%s SERVER user %s quit editing %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
sendToAllClients(('script-status-update', scriptno, "RAW", None))
client.editingScript = None
else:
print("%s SERVER user %s not editing script %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "flag-scripts" == incomingdata[1]:
if incomingdata[0] == client.key:
scriptno = incomingdata[2]
flagtype = incomingdata[3]
database.updateScriptStatus(flagtype, client.username, scriptno)
print("%s SERVER user %s flagging script %s as %s" % (
datetime.datetime.now(), repr(client.username), scriptno, flagtype))
sendToAllClients(('script-status-update', scriptno, flagtype, client.username))
client.editingScript = None
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "add-script" == incomingdata[1]:
if incomingdata[0] == client.key:
url = incomingdata[2]
try:
post = reddit.getPostByUrl(url)
if post is not None:
all_scripts = database.getScriptIds()
scriptIds = [scriptid[1] for scriptid in all_scripts]
print("Got script ids")
if post.submission_id in scriptIds:
print("Found script with same id")
database.updateScriptStatusById("RAW", client.username, post.submission_id)
print("Set it to raw.")
database.updateSubmission(post)
print("Updated submission")
print("%s SERVER user %s reset script %s" % (
datetime.datetime.now(), repr(client.username), post.submission_id))
sendToClient(client_connection,
('add-script-success', True, "Reset script"))
else:
print("%s SERVER user %s added script %s" % (
datetime.datetime.now(), repr(client.username), post.submission_id))
database.addSubmission(post)
sendToClient(client_connection,
('add-script-success', True, "Successfully added script"))
else:
print("%s SERVER user %s attempted to add script that already exists" % (
datetime.datetime.now(), repr(client.username)))
sendToClient(client_connection,
('add-script-success', False, "Error occured with url."))
except Exception as e:
print("%s SERVER user %s error attempting to add script %s" % (
datetime.datetime.now(), repr(client.username), url))
print(e)
sendToClient(client_connection,
('add-script-success', False, "An error occured trying to add the script"))
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "PING" == incomingdata[1]:
if incomingdata[0] == client.key:
client.lastPing = datetime.datetime.now()
print("%s SERVER sending PONG to %s" % (datetime.datetime.now(), repr(client.username)))
sendToClient(client.connection, ('PONG',))
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
if (datetime.datetime.now().minute - client.lastPing.minute) > 2:
print("%s SERVER no PING from %s in 2 minutes. Disconnecting" % (
datetime.datetime.now(), repr(client.username)))
client.disconnect = True
print("%s SERVER Thread shutting down" % datetime.datetime.now())
client.disconnect = True
break
def sendToClient(client_connection, payloadattachment):
payload_attach = pickle.dumps(payloadattachment)
HEADERSIZE = 10
payload = bytes(f"{len(payload_attach):<{HEADERSIZE}}", 'utf-8') + payload_attach
client_connection.sendall(payload)
def handleCompletedScripts():
while True:
pass
def serverTick():
global clients
while True:
sleep(0.1)
scriptsbeingedited = database.getScriptEditInformation() # gets information of scripts with EDITING status
sciptsbeingeditedby = [editedby[2] for editedby in scriptsbeingedited] # gets names of scripts with editedby
online_users = database.getOnlineUsers()
clientIndexToRemove = []
if clients:
for i, client in enumerate(clients):
if client.username in sciptsbeingeditedby:
indexOfScript = sciptsbeingeditedby.index(client.username)
scriptno = scriptsbeingedited[indexOfScript][0]
# set script client was editing to raw
if not client.editingScript == scriptno and scriptno not in client.scriptsComplete:
print("%s SERVER setting status of script %s to RAW because client is not editing it" % (
datetime.datetime.now(), scriptno))
database.updateScriptStatus("RAW", None, scriptno)
for client_con in getAllClientConnections():
sendToClient(client_con, ('script-status-update', scriptno, "RAW", None))
if client.disconnect: # if client disconnects set script to raw
clientIndexToRemove.append(i)
else:
if scriptsbeingedited:
for script in scriptsbeingedited:
database.updateScriptStatus("RAW", None, script[0])
for client_con in getAllClientConnections():
sendToClient(client_con, ('script-status-update', scriptno, "RAW", None))
print("%s SERVER setting status of all scrips to RAW as there are no clients." % (
datetime.datetime.now()))
if online_users:
for user in online_users:
database.updateUserStatus(user, None)
print("%s SERVER removing online status for %s as there are no clients" % (
datetime.datetime.now(), user))
if clientIndexToRemove:
for index in clientIndexToRemove:
print("deleted clients")
try:
if clients[index].username is not None:
database.updateUserStatus(clients[index].username, None)
for client in clients:
if not client.disconnect:
sendToClient(client.connection,
('script-status-update', clients[index].editingScript, "RAW", None))
except IndexError:
pass
try:
new_clients = []
for i in range(len(clients)):
if not clients[index] == clients[i]:
new_clients.append(clients[i])
clients = new_clients
except IndexError:
print("could not update client list")
if scriptsbeingedited:
pass
def generateKey():
"""Generate a random string of letters, digits and special characters """
password_characters = string.ascii_letters + string.digits + string.punctuation
return ''.join(random.choice(password_characters) for i in range(10))
|
TRIOU_Extension.py
|
myDebug = 1
import os, signal, time
from threading import Thread, Lock
import os
import re
from xsalome import corba2python
import SALOME
import SALOMEDS
import SALOME__POA
import TRIOU_CORBA
import TRIOU_CORBA__POA
# ------------------------------------------
import warnings
if not myDebug:
warnings.filterwarnings("ignore", "", RuntimeWarning)
# ------------------------------------------
__DEFAULT_DELAY__ = 0.1 # default solver delay
# --- CORBA.ORB instance ---
from omniORB import CORBA
myORB = CORBA.ORB_init([''], CORBA.ORB_ID)
# --- Life Cycle CORBA ---
import LifeCycleCORBA
myLCC = LifeCycleCORBA.LifeCycleCORBA(myORB)
# -----------------------------
# Find or load TRIOU engine
# -----------------------------
def getEngine():
engine = myLCC.FindOrLoadComponent( "FactoryServerPy", "TRIOU" )
return engine
# -----------------------------
# Find or load VISU engine
# -----------------------------
def getVISU():
import VISU
aVISU = myLCC.FindOrLoadComponent( "FactoryServer", "VISU" )
return aVISU
# ------------------------------------------
class Extension(TRIOU_CORBA__POA.Extension):
def __init__(self):
"""
Constructor
"""
self._pid = 0
self._lock = Lock()
self._SolverLock = Lock()
self._paused = 0
self._PauseLock = Lock()
self._delay = __DEFAULT_DELAY__
self._workDirCreated = 0
self._workDir = ""
self._dataFile = ""
self._solverPath = ""
self._study = None
self._status = TRIOU_CORBA.OK_STATUS
pass
def StartSolver(self, theStudy, theInputFile, theSolverPath, theWorkingDir):
"""
Starts TRIOU solver:
<theStudy> - a SALOMEDS Study reference
<theInputFile> - input data file (*.data), if it is empty,
the input data file is generated from the study contents
(in this case <theStudy> should not be None)
<theSolverPath> - the path to the TRIOU solver, if it is empty, the default one is used.
<theWorkingDir> - the working directory, if it is empty, a temporary one is created.
Returns 1 in success and 0 in failure
"""
self._status = TRIOU_CORBA.FAILED_TO_RUN
# check if solver is already running
if self._pid != 0:
if myDebug:
print("*******************************************************")
print("* TRIOU_Extension.StartSolver: ERROR!")
print("* Solver is already started!")
print("*******************************************************")
return 0
# study
self._study = theStudy
# check solver binary path
if not theSolverPath:
theSolverPath = "TRUST_mpich_opt_st"
if "TRIOU_ROOT_DIR" in os.environ:
aDir = os.getenv( "TRIOU_ROOT_DIR" ) + "/bin/"
if os.path.exists( aDir ):
theSolverPath = aDir + theSolverPath
if not os.path.exists( theSolverPath ) or not os.path.isfile( theSolverPath ):
if myDebug:
print("*******************************************************")
print("* TRIOU_Extension.StartSolver: ERROR!")
print("* Cannot start solver: solver is not found!")
print("*******************************************************")
return 0
if myDebug:
print("*******************************************************")
print("* WARNING! No solver executable name is given.")
print("* Using default one:", theSolverPath)
print("*******************************************************")
self._solverPath = theSolverPath
try:
# check/create working directory
if theWorkingDir:
if not os.path.exists( theWorkingDir ) or not os.path.isdir( theWorkingDir ):
if myDebug:
print("*******************************************************")
print("* TRIOU_Extension.StartSolver: ERROR!")
print("* Cannot start solver: wrong working directory!")
print("*******************************************************")
return 0
else:
# create temporary directory
theWorkingDir = '/tmp'
theWorkingDir = os.tempnam( theWorkingDir, "TRIOU" )
os.makedirs( theWorkingDir )
self._workDirCreated = 1
self._workDir = theWorkingDir
if myDebug: print("TRIOU_Extension.StartSolver: working directory : ", self._workDir)
self._dataFile = self._workDir + os.sep + "Result.data"
if theInputFile:
# if <theInputFile> is given, create link to it in the working directory
if not os.path.exists( theInputFile ):
if myDebug:
print("*******************************************************")
print("* TRIOU_Extension.StartSolver: ERROR!")
print("* Cannot start solver: wrong input file!")
print("*******************************************************")
self._ClearWorkingDir()
return 0
os.symlink( theInputFile, self._dataFile )
else:
# if <theInputFile> is not given, try to generate input data from the study
# first check if study is given
if self._study is None:
if myDebug:
print("*******************************************************")
print("* TRIOU_Extension.StartSolver: ERROR!")
print("* Cannot start solver: no study is given!")
print("*******************************************************")
self._ClearWorkingDir()
return 0
# then look for the TRIOU component
aComp = self._study.FindComponent( "TRIOU" )
if aComp is None:
if myDebug:
print("*******************************************************")
print("* TRIOU_Extension.StartSolver: ERROR!")
print("* Cannot start solver: TRIOU component is not found!")
print("*******************************************************")
self._ClearWorkingDir()
return 0
# collect only first level objects
anIter = self._study.NewChildIterator( aComp )
listclass = []
while anIter.More():
aSObj = anIter.Value()
anIter.Next()
if myDebug: print("TRIOU_Extension.StartSolver: SObject:", aSObj)
aObject = aSObj.GetObject()
if myDebug: print("TRIOU_Extension.StartSolver: CORBA Object:", aObject)
if aObject:
aPObj = corba2python( aObject )
if myDebug: print("TRIOU_Extension.StartSolver: Python Object:", aPObj)
if aPObj:
listclass.append( aPObj )
pass
pass
# export the study structure to the data file
from triou import write_file_data
if myDebug: print("TRIOU_Extension.StartSolver: Writing input file: ", self._dataFile)
write_file_data( self._dataFile, listclass )
# starting execution
self._SolverLock.acquire()
self._lock.acquire()
if self._paused:
self._PauseLock.release()
self._paused = 0
Thread( target=self._RunSolver, args=()).start()
self._lock.acquire()
self._lock.release()
except Exception as e:
if myDebug:
print("*******************************************************")
print("* TRIOU_Extension.StartSolver: ERROR!")
print("* An exception has bee caught!")
print("*", e)
print("*******************************************************")
self._ClearWorkingDir()
return 0
return 1
def _RunSolver(self):
"""
Performs the solver running in the thread loop (internal usage)
"""
# Starting the solver
aBaseName = os.path.basename( self._dataFile ).split( '.' )[ 0 ] # remove extension
aCmd = 'cd ' + self._workDir + '; exec ' + self._solverPath + ' ' + aBaseName + ' >& ' + self._workDir + os.sep + 'TRIOU.log'
self._pid = os.spawnlp( os.P_NOWAIT, '/bin/sh', 'sh', '-c', aCmd)
if myDebug: print('TRIOU_Extension._RunSolver: PROCESS STARTED -', self._pid)
self._lock.release()
self._status = TRIOU_CORBA.RUNNING
try:
while 1:
# Is the solver alive?
aPid, aStatus = os.waitpid(self._pid, os.WNOHANG)
if self._paused:
self._PauseLock.acquire()
self._PauseLock.release()
pass
if myDebug: print("TRIOU_Extension._RunSolver: sleep for ", self._delay, "seconds")
time.sleep( self._delay )
pass
pass
except Exception as e:
if myDebug: print("TRIOU_Extension._RunSolver: Exception - ", e)
pass
self._status = self._PublishData()
self._ClearWorkingDir()
self._lock.acquire()
# We are not interested in <pid> any longer
self._pid = 0
self._lock.release()
self._SolverLock.release()
pass
def _ClearWorkingDir(self):
"""
Clears/removes working directory (internal usage)
"""
if self._workDir:
if self._workDirCreated:
os.system( 'rm -rf ' + self._workDir )
pass
def _PublishData(self):
"""
Publishes the MED results if there are any in the study (internal usage)
"""
if not self._workDir:
return TRIOU_CORBA.FAILED_TO_PUBLISH
# look for MED files in the working directory
aMedFileRE = re.compile( ".+\.med$" )
aListDir = os.listdir( self._workDir )
aListOfFiles = []
aFile = None
for aFileName in aListDir:
if re.match( aMedFileRE, aFileName ):
aFile = self._workDir + os.sep + aFileName
aListOfFiles.append( aFile )
if myDebug: print("TRIOU_Extension._PublishData: found MED file ", aFile)
pass
# export MED results to VISU if there are any
status = TRIOU_CORBA.OK_STATUS
errors = 0
if len( aListOfFiles ) > 0:
aVisuGen = getVISU()
if aVisuGen is not None:
if self._study is not None:
aVisuGen.SetCurrentStudy( self._study )
for aFile in aListOfFiles:
try:
aResult = aVisuGen.CopyAndImportFile( aFile )
except:
errors = errors + 1
if myDebug:
print("********************************************************")
print("* TRIOU_Extension._PublishData: ERROR!")
print("* Can't Import MED file", aFile)
print("********************************************************")
pass
else:
errors = errors + 1
else:
status = TRIOU_CORBA.OK_NO_VISU
if myDebug:
print("********************************************************")
print("* TRIOU_Extension._PublishData: ERROR!")
print("* Can't find VISU module")
print("********************************************************")
pass
else:
status = TRIOU_CORBA.OK_NO_MED_FILES
if myDebug:
print("********************************************************")
print("* TRIOU_Extension._PublishData: WARNING!")
print("* No result MED files is produced")
print("********************************************************")
if errors > 0:
status = TRIOU_CORBA.OK_WITH_ERRORS
return status
def StopSolver(self):
"""
Stops the solver execution
"""
if self._pid == 0:
return
self._lock.acquire()
if self._paused:
self._PauseLock.release()
pass
try:
os.kill(self._pid, signal.SIGKILL)
except:
pass
self._pid = 0
self._paused = 0
if myDebug: print('TRIOU_Extension.StopSolver: PROCESS KILLED!')
self._lock.release()
def PauseSolver(self):
"""
Pauses the solver execution
"""
if self._pid == 0 or self._paused == 1:
return
self._lock.acquire()
self._PauseLock.acquire()
try:
os.kill(self._pid, signal.SIGSTOP)
self._paused = 1
if myDebug: print('TRIOU_Extension.PauseSolver: PROCESS PAUSED!')
except:
self._PauseLock.release()
self._lock.release()
def ContinueSolver(self):
"""
Resumes the solver execution
"""
if self._pid == 0 or self._paused == 0:
return
self._lock.acquire()
try:
os.kill(self._pid, signal.SIGCONT)
self._paused = 0
if myDebug: print('TRIOU_Extension.ContinueSolver: PROCESS RESUMED!')
self._PauseLock.release()
except:
pass
self._lock.release()
def IsSolverRunning(self):
"""
Returns 1 if solver execution is in process
"""
return (self._pid != 0)
def IsSolverPaused(self):
"""
Returns 1 if solver execution is paused
"""
return self._paused
def GetStatus(self):
"""
Gets the status of the solver execution process
"""
return self._status
|
myo_multithreading_examp.py
|
import multiprocessing
from pyomyo import Myo, emg_mode
# ------------ Myo Setup ---------------
q = multiprocessing.Queue()
def worker(q):
m = Myo(mode=emg_mode.FILTERED)
m.connect()
def add_to_queue(emg, movement):
q.put(emg)
m.add_emg_handler(add_to_queue)
def print_battery(bat):
print("Battery level:", bat)
m.add_battery_handler(print_battery)
# Orange logo and bar LEDs
m.set_leds([128, 0, 0], [128, 0, 0])
# Vibrate to know we connected okay
m.vibrate(1)
"""worker function"""
while True:
m.run()
print("Worker Stopped")
# -------- Main Program Loop -----------
if __name__ == "__main__":
p = multiprocessing.Process(target=worker, args=(q,))
p.start()
try:
while True:
while not(q.empty()):
emg = list(q.get())
print(emg)
except KeyboardInterrupt:
print("Quitting")
quit()
|
jobshandler.py
|
from queue import Queue
from threading import Thread
from ydl_server.logdb import JobsDB, Actions
queue = Queue()
thread = None
done = False
def start(dl_queue):
thread = Thread(target=worker, args=(dl_queue,))
thread.start()
def put(obj):
queue.put(obj)
def finish():
done = True
def worker(dl_queue):
db = JobsDB(readonly=False)
while not done:
action, job = queue.get()
if action == Actions.PURGE_LOGS:
db.purge_jobs()
elif action == Actions.INSERT:
db.insert_job(job)
dl_queue.put(job)
elif action == Actions.UPDATE:
db.update_job(job)
elif action == Actions.RESUME:
db.update_job(job)
dl_queue.put(job)
elif action == Actions.SET_NAME:
job_id, name = job
db.set_job_name(job_id, name)
elif action == Actions.SET_LOG:
job_id, log = job
db.set_job_log(job_id, log)
elif action == Actions.SET_STATUS:
job_id, status = job
db.set_job_status(job_id, status)
queue.task_done()
def join():
if thread is not None:
return thread.join()
|
email.py
|
from threading import Thread
from .. import mail
from flask_mail import Message
from flask import current_app, render_template
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template_txt, template_html=None, **kwargs):
app = current_app._get_current_object()
msg = Message(subject)
msg.sender = current_app._get_current_object().config['MAIL_USERNAME']
msg.recipients = [to]
if not template_html:
template_html = template_txt
msg.body = render_template(template_txt, **kwargs)
msg.html = render_template(template_html, **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
def send_confirm_email(to, user, token):
title = 'Simple Blog confirm user email'
template_txt = 'useraccounts/email/confirm.txt'
template_html = 'useraccounts/email/confirm.html'
return send_email(to, title, template_txt, template_html, user=user, token=token)
def send_reset_password_mail(to, user, token):
title = 'Simple Blog reset user password'
template_txt = 'useraccounts/email/reset_password.txt'
template_html = 'useraccounts/email/reset_password.html'
return send_email(to, title, template_txt, template_html, user=user, token=token)
|
tensorboard.py
|
"Provides convenient callbacks for Learners that write model images, metrics/losses, stats and histograms to Tensorboard"
from ..basic_train import Learner
from ..basic_data import DatasetType, DataBunch
from ..vision import Image
from ..vision.gan import GANLearner
from ..callbacks import LearnerCallback
from ..core import *
from ..torch_core import *
from threading import Thread, Event
from time import sleep
from queue import Queue
import statistics
import torchvision.utils as vutils
from abc import ABC
#This is an optional dependency in fastai. Must install separately.
try: from tensorboardX import SummaryWriter
except: pass
__all__=['LearnerTensorboardWriter', 'GANTensorboardWriter', 'ImageGenTensorboardWriter']
#---Example usage (applies to any of the callbacks)---
# proj_id = 'Colorize'
# tboard_path = Path('data/tensorboard/' + proj_id)
# learn.callback_fns.append(partial(GANTensorboardWriter, base_dir=tboard_path, name='GanLearner'))
class LearnerTensorboardWriter(LearnerCallback):
"Broadly useful callback for Learners that writes to Tensorboard. Writes model histograms, losses/metrics, and gradient stats."
def __init__(self, learn:Learner, base_dir:Path, name:str, loss_iters:int=25, hist_iters:int=500, stats_iters:int=100):
super().__init__(learn=learn)
self.base_dir,self.name,self.loss_iters,self.hist_iters,self.stats_iters = base_dir,name,loss_iters,hist_iters,stats_iters
log_dir = base_dir/name
self.tbwriter = SummaryWriter(log_dir=str(log_dir))
self.hist_writer = HistogramTBWriter()
self.stats_writer = ModelStatsTBWriter()
self.data = None
self.metrics_root = '/metrics/'
self._update_batches_if_needed()
def _get_new_batch(self, ds_type:DatasetType)->Collection[Tensor]:
"Retrieves new batch of DatasetType, and detaches it."
return self.learn.data.one_batch(ds_type=ds_type, detach=True, denorm=False, cpu=False)
def _update_batches_if_needed(self)->None:
"one_batch function is extremely slow with large datasets. This is caching the result as an optimization."
update_batches = self.data is not self.learn.data
if not update_batches: return
self.data = self.learn.data
self.trn_batch = self._get_new_batch(ds_type=DatasetType.Train)
self.val_batch = self._get_new_batch(ds_type=DatasetType.Valid)
def _write_model_stats(self, iteration:int)->None:
"Writes gradient statistics to Tensorboard."
self.stats_writer.write(model=self.learn.model, iteration=iteration, tbwriter=self.tbwriter)
def _write_training_loss(self, iteration:int, last_loss:Tensor)->None:
"Writes training loss to Tensorboard."
scalar_value = to_np(last_loss)
tag = self.metrics_root + 'train_loss'
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
def _write_weight_histograms(self, iteration:int)->None:
"Writes model weight histograms to Tensorboard."
self.hist_writer.write(model=self.learn.model, iteration=iteration, tbwriter=self.tbwriter)
def _write_scalar(self, name:str, scalar_value, iteration:int)->None:
"Writes single scalar value to Tensorboard."
tag = self.metrics_root + name
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
#TODO: Relying on a specific hardcoded start_idx here isn't great. Is there a better solution?
def _write_metrics(self, iteration:int, last_metrics:MetricsList, start_idx:int=2)->None:
"Writes training metrics to Tensorboard."
recorder = self.learn.recorder
for i, name in enumerate(recorder.names[start_idx:]):
if len(last_metrics) < i+1: return
scalar_value = last_metrics[i]
self._write_scalar(name=name, scalar_value=scalar_value, iteration=iteration)
def on_batch_end(self, last_loss:Tensor, iteration:int, **kwargs)->None:
"Callback function that writes batch end appropriate data to Tensorboard."
if iteration == 0: return
self._update_batches_if_needed()
if iteration % self.loss_iters == 0: self._write_training_loss(iteration=iteration, last_loss=last_loss)
if iteration % self.hist_iters == 0: self._write_weight_histograms(iteration=iteration)
# Doing stuff here that requires gradient info, because they get zeroed out afterwards in training loop
def on_backward_end(self, iteration:int, **kwargs)->None:
"Callback function that writes backward end appropriate data to Tensorboard."
if iteration == 0: return
self._update_batches_if_needed()
if iteration % self.stats_iters == 0: self._write_model_stats(iteration=iteration)
def on_epoch_end(self, last_metrics:MetricsList, iteration:int, **kwargs)->None:
"Callback function that writes epoch end appropriate data to Tensorboard."
self._write_metrics(iteration=iteration, last_metrics=last_metrics)
# TODO: We're overriding almost everything here. Seems like a good idea to question that ("is a" vs "has a")
class GANTensorboardWriter(LearnerTensorboardWriter):
"Callback for GANLearners that writes to Tensorboard. Extends LearnerTensorboardWriter and adds output image writes."
def __init__(self, learn:GANLearner, base_dir:Path, name:str, loss_iters:int=25, hist_iters:int=500,
stats_iters:int=100, visual_iters:int=100):
super().__init__(learn=learn, base_dir=base_dir, name=name, loss_iters=loss_iters, hist_iters=hist_iters, stats_iters=stats_iters)
self.visual_iters = visual_iters
self.img_gen_vis = ImageTBWriter()
self.gen_stats_updated = True
self.crit_stats_updated = True
def _write_weight_histograms(self, iteration:int)->None:
"Writes model weight histograms to Tensorboard."
generator, critic = self.learn.gan_trainer.generator, self.learn.gan_trainer.critic
self.hist_writer.write(model=generator, iteration=iteration, tbwriter=self.tbwriter, name='generator')
self.hist_writer.write(model=critic, iteration=iteration, tbwriter=self.tbwriter, name='critic')
def _write_gen_model_stats(self, iteration:int)->None:
"Writes gradient statistics for generator to Tensorboard."
generator = self.learn.gan_trainer.generator
self.stats_writer.write(model=generator, iteration=iteration, tbwriter=self.tbwriter, name='gen_model_stats')
self.gen_stats_updated = True
def _write_critic_model_stats(self, iteration:int)->None:
"Writes gradient statistics for critic to Tensorboard."
critic = self.learn.gan_trainer.critic
self.stats_writer.write(model=critic, iteration=iteration, tbwriter=self.tbwriter, name='crit_model_stats')
self.crit_stats_updated = True
def _write_model_stats(self, iteration:int)->None:
"Writes gradient statistics to Tensorboard."
# We don't want to write stats when model is not iterated on and hence has zeroed out gradients
gen_mode = self.learn.gan_trainer.gen_mode
if gen_mode and not self.gen_stats_updated: self._write_gen_model_stats(iteration=iteration)
if not gen_mode and not self.crit_stats_updated: self._write_critic_model_stats(iteration=iteration)
def _write_training_loss(self, iteration:int, last_loss:Tensor)->None:
"Writes training loss to Tensorboard."
recorder = self.learn.gan_trainer.recorder
if len(recorder.losses) == 0: return
scalar_value = to_np((recorder.losses[-1:])[0])
tag = self.metrics_root + 'train_loss'
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
def _write_images(self, iteration:int)->None:
"Writes model generated, original and real images to Tensorboard."
trainer = self.learn.gan_trainer
#TODO: Switching gen_mode temporarily seems a bit hacky here. Certainly not a good side-effect. Is there a better way?
gen_mode = trainer.gen_mode
try:
trainer.switch(gen_mode=True)
self.img_gen_vis.write(learn=self.learn, trn_batch=self.trn_batch, val_batch=self.val_batch,
iteration=iteration, tbwriter=self.tbwriter)
finally: trainer.switch(gen_mode=gen_mode)
def on_batch_end(self, iteration:int, **kwargs)->None:
"Callback function that writes batch end appropriate data to Tensorboard."
super().on_batch_end(iteration=iteration, **kwargs)
if iteration == 0: return
if iteration % self.visual_iters == 0: self._write_images(iteration=iteration)
def on_backward_end(self, iteration:int, **kwargs)->None:
"Callback function that writes backward end appropriate data to Tensorboard."
if iteration == 0: return
self._update_batches_if_needed()
#TODO: This could perhaps be implemented as queues of requests instead but that seemed like overkill.
# But I'm not the biggest fan of maintaining these boolean flags either... Review pls.
if iteration % self.stats_iters == 0: self.gen_stats_updated, self.crit_stats_updated = False, False
if not (self.gen_stats_updated and self.crit_stats_updated): self._write_model_stats(iteration=iteration)
class ImageGenTensorboardWriter(LearnerTensorboardWriter):
"Callback for non-GAN image generating Learners that writes to Tensorboard. Extends LearnerTensorboardWriter and adds output image writes."
def __init__(self, learn:Learner, base_dir:Path, name:str, loss_iters:int=25, hist_iters:int=500, stats_iters:int=100,
visual_iters:int=100):
super().__init__(learn=learn, base_dir=base_dir, name=name, loss_iters=loss_iters, hist_iters=hist_iters,
stats_iters=stats_iters)
self.visual_iters = visual_iters
self.img_gen_vis = ImageTBWriter()
def _write_images(self, iteration:int)->None:
"Writes model generated, original and real images to Tensorboard"
self.img_gen_vis.write(learn=self.learn, trn_batch=self.trn_batch, val_batch=self.val_batch, iteration=iteration,
tbwriter=self.tbwriter)
def on_batch_end(self, iteration:int, **kwargs)->None:
"Callback function that writes batch end appropriate data to Tensorboard."
super().on_batch_end(iteration=iteration, **kwargs)
if iteration == 0: return
if iteration % self.visual_iters == 0: self._write_images(iteration=iteration)
class TBWriteRequest(ABC):
"A request object for Tensorboard writes. Useful for queuing up and executing asynchronous writes."
def __init__(self, tbwriter: SummaryWriter, iteration:int):
super().__init__()
self.tbwriter = tbwriter
self.iteration = iteration
@abstractmethod
def write(self)->None: pass
# SummaryWriter writes tend to block quite a bit. This gets around that and greatly boosts performance.
# Not all tensorboard writes are using this- just the ones that take a long time. Note that the
# SummaryWriter does actually use a threadsafe consumer/producer design ultimately to write to Tensorboard,
# so writes done outside of this async loop should be fine.
class AsyncTBWriter():
"Callback for GANLearners that writes to Tensorboard. Extends LearnerTensorboardWriter and adds output image writes."
def __init__(self):
super().__init__()
self.stop_request = Event()
self.queue = Queue()
self.thread = Thread(target=self._queue_processor, daemon=True)
self.thread.start()
def request_write(self, request: TBWriteRequest)->None:
"Queues up an asynchronous write request to Tensorboard."
if self.stop_request.isSet(): return
self.queue.put(request)
def _queue_processor(self)->None:
"Processes queued up write requests asynchronously to Tensorboard."
while not self.stop_request.isSet():
while not self.queue.empty():
if self.stop_request.isSet(): return
request = self.queue.get()
request.write()
sleep(0.2)
#Provided this to stop thread explicitly or by context management (with statement) but thread should end on its own
# upon program exit, due to being a daemon. So using this is probably unecessary.
def close(self)->None:
"Stops asynchronous request queue processing thread."
self.stop_request.set()
self.thread.join()
# Nothing to do, thread already started. Could start thread here to enforce use of context manager
# (but that sounds like a pain and a bit unweildy and unecessary for actual usage)
def __enter__(self): pass
def __exit__(self, exc_type, exc_value, traceback): self.close()
asyncTBWriter = AsyncTBWriter()
class ModelImageSet():
"Convenience object that holds the original, real(target) and generated versions of a single image fed to a model."
@staticmethod
def get_list_from_model(learn:Learner, ds_type:DatasetType, batch:Tuple)->[]:
"Factory method to convert a batch of model images to a list of ModelImageSet."
image_sets = []
x,y = batch[0],batch[1]
preds = learn.pred_batch(ds_type=ds_type, batch=(x,y), reconstruct=True)
for orig_px, real_px, gen in zip(x,y,preds):
orig, real = Image(px=orig_px), Image(px=real_px)
image_set = ModelImageSet(orig=orig, real=real, gen=gen)
image_sets.append(image_set)
return image_sets
def __init__(self, orig:Image, real:Image, gen:Image): self.orig, self.real, self.gen = orig, real, gen
class HistogramTBRequest(TBWriteRequest):
"Request object for model histogram writes to Tensorboard."
def __init__(self, model:nn.Module, iteration:int, tbwriter:SummaryWriter, name:str):
super().__init__(tbwriter=tbwriter, iteration=iteration)
self.params = [(name, values.clone().detach().cpu()) for (name, values) in model.named_parameters()]
self.name = name
def _write_histogram(self, param_name:str, values)->None:
"Writes single model histogram to Tensorboard."
tag = self.name + '/weights/' + param_name
self.tbwriter.add_histogram(tag=tag, values=values, global_step=self.iteration)
def write(self)->None:
"Writes model histograms to Tensorboard."
for param_name, values in self.params: self._write_histogram(param_name=param_name, values=values)
#If this isn't done async then this is sloooooow
class HistogramTBWriter():
"Writes model histograms to Tensorboard."
def __init__(self): super().__init__()
def write(self, model:nn.Module, iteration:int, tbwriter:SummaryWriter, name:str='model')->None:
"Writes model histograms to Tensorboard."
request = HistogramTBRequest(model=model, iteration=iteration, tbwriter=tbwriter, name=name)
asyncTBWriter.request_write(request)
class ModelStatsTBRequest(TBWriteRequest):
"Request object for model gradient statistics writes to Tensorboard."
def __init__(self, model:nn.Module, iteration:int, tbwriter:SummaryWriter, name:str):
super().__init__(tbwriter=tbwriter, iteration=iteration)
self.gradients = [x.grad.clone().detach().cpu() for x in model.parameters() if x.grad is not None]
self.name = name
def _add_gradient_scalar(self, name:str, scalar_value)->None:
"Writes a single scalar value for a gradient statistic to Tensorboard."
tag = self.name + '/gradients/' + name
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=self.iteration)
def _write_avg_norm(self, norms:[])->None:
"Writes the average norm of the gradients to Tensorboard."
avg_norm = sum(norms)/len(self.gradients)
self._add_gradient_scalar('avg_norm', scalar_value=avg_norm)
def _write_median_norm(self, norms:[])->None:
"Writes the median norm of the gradients to Tensorboard."
median_norm = statistics.median(norms)
self._add_gradient_scalar('median_norm', scalar_value=median_norm)
def _write_max_norm(self, norms:[])->None:
"Writes the maximum norm of the gradients to Tensorboard."
max_norm = max(norms)
self._add_gradient_scalar('max_norm', scalar_value=max_norm)
def _write_min_norm(self, norms:[])->None:
"Writes the minimum norm of the gradients to Tensorboard."
min_norm = min(norms)
self._add_gradient_scalar('min_norm', scalar_value=min_norm)
def _write_num_zeros(self)->None:
"Writes the number of zeroes in the gradients to Tensorboard."
gradient_nps = [to_np(x.data) for x in self.gradients]
num_zeros = sum((np.asarray(x) == 0.0).sum() for x in gradient_nps)
self._add_gradient_scalar('num_zeros', scalar_value=num_zeros)
def _write_avg_gradient(self)->None:
"Writes the average of the gradients to Tensorboard."
avg_gradient = sum(x.data.mean() for x in self.gradients)/len(self.gradients)
self._add_gradient_scalar('avg_gradient', scalar_value=avg_gradient)
def _write_median_gradient(self)->None:
"Writes the median of the gradients to Tensorboard."
median_gradient = statistics.median(x.data.median() for x in self.gradients)
self._add_gradient_scalar('median_gradient', scalar_value=median_gradient)
def _write_max_gradient(self)->None:
"Writes the maximum of the gradients to Tensorboard."
max_gradient = max(x.data.max() for x in self.gradients)
self._add_gradient_scalar('max_gradient', scalar_value=max_gradient)
def _write_min_gradient(self)->None:
"Writes the minimum of the gradients to Tensorboard."
min_gradient = min(x.data.min() for x in self.gradients)
self._add_gradient_scalar('min_gradient', scalar_value=min_gradient)
def write(self)->None:
"Writes model gradient statistics to Tensorboard."
if len(self.gradients) == 0: return
norms = [x.data.norm() for x in self.gradients]
self._write_avg_norm(norms=norms)
self._write_median_norm(norms=norms)
self._write_max_norm(norms=norms)
self._write_min_norm(norms=norms)
self._write_num_zeros()
self._write_avg_gradient()
self._write_median_gradient()
self._write_max_gradient()
self._write_min_gradient()
class ModelStatsTBWriter():
"Writes model gradient statistics to Tensorboard."
def write(self, model:nn.Module, iteration:int, tbwriter:SummaryWriter, name:str='model_stats')->None:
"Writes model gradient statistics to Tensorboard."
request = ModelStatsTBRequest(model=model, iteration=iteration, tbwriter=tbwriter, name=name)
asyncTBWriter.request_write(request)
class ImageTBRequest(TBWriteRequest):
"Request object for model image output writes to Tensorboard."
def __init__(self, learn:Learner, batch:Tuple, iteration:int, tbwriter:SummaryWriter, ds_type:DatasetType):
super().__init__(tbwriter=tbwriter, iteration=iteration)
self.image_sets = ModelImageSet.get_list_from_model(learn=learn, batch=batch, ds_type=ds_type)
self.ds_type = ds_type
def _write_images(self, name:str, images:[Tensor])->None:
"Writes list of images as tensors to Tensorboard."
tag = self.ds_type.name + ' ' + name
self.tbwriter.add_image(tag=tag, img_tensor=vutils.make_grid(images, normalize=True), global_step=self.iteration)
def _get_image_tensors(self)->([Tensor], [Tensor], [Tensor]):
"Gets list of image tensors from lists of Image objects, as a tuple of original, generated and real(target) images."
orig_images, gen_images, real_images = [], [], []
for image_set in self.image_sets:
orig_images.append(image_set.orig.px)
gen_images.append(image_set.gen.px)
real_images.append(image_set.real.px)
return orig_images, gen_images, real_images
def write(self)->None:
"Writes original, generated and real(target) images to Tensorboard."
orig_images, gen_images, real_images = self._get_image_tensors()
self._write_images(name='orig images', images=orig_images)
self._write_images(name='gen images', images=gen_images)
self._write_images(name='real images', images=real_images)
#If this isn't done async then this is noticeably slower
class ImageTBWriter():
"Writes model image output to Tensorboard."
def __init__(self): super().__init__()
def write(self, learn:Learner, trn_batch:Tuple, val_batch:Tuple, iteration:int, tbwriter:SummaryWriter)->None:
"Writes training and validation batch images to Tensorboard."
self._write_for_dstype(learn=learn, batch=val_batch, iteration=iteration, tbwriter=tbwriter, ds_type=DatasetType.Valid)
self._write_for_dstype(learn=learn, batch=trn_batch, iteration=iteration, tbwriter=tbwriter, ds_type=DatasetType.Train)
def _write_for_dstype(self, learn:Learner, batch:Tuple, iteration:int, tbwriter:SummaryWriter, ds_type:DatasetType)->None:
"Writes batch images of specified DatasetType to Tensorboard."
request = ImageTBRequest(learn=learn, batch=batch, iteration=iteration, tbwriter=tbwriter, ds_type=ds_type)
asyncTBWriter.request_write(request)
|
udp_test.py
|
import socket
from typing import NamedTuple
from multiprocessing import Process
import time
from urllib.parse import urlparse
COUNT = 200000
ADDR = 'tcp://127.0.0.1:5557'
class Combi(NamedTuple):
recv: int
send: int
should_multipart: bool
should_multipart_hand: bool
COMBINATIONS = {
'a': Combi(1, 1, 1, 1)
}
def worker(n):
c = COMBINATIONS[n]
addr = urlparse(ADDR)
UDP_IP = addr.hostname
UDP_PORT = addr.port
sock = socket.socket(
socket.AF_INET,
socket.SOCK_DGRAM
)
sock.bind((UDP_IP, UDP_PORT))
first = True
for task_nbr in range(COUNT):
data, addr = sock.recvfrom(512)
if len(data) and first:
print('\tXXX', len(data), addr)
first = False
# else:
# break
def main(n):
c = COMBINATIONS[n]
p = Process(target=worker, args=(n,))
p.start()
sock = socket.socket(
socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
addr = urlparse(ADDR)
UDP_IP = addr.hostname
UDP_PORT = addr.port
payload = b'123'*120
for num in range(COUNT):
sock.sendto(payload, (UDP_IP, UDP_PORT))
return p
if __name__ == "__main__":
for n in COMBINATIONS.keys():
print(n, COMBINATIONS[n])
start_time = time.time()
p = main(n)
end_time = time.time()
duration = end_time - start_time
msg_per_sec = COUNT / duration
print("\tDuration: %s" % duration)
print("\tMessages Per Second: %s" % msg_per_sec)
p.join()
|
fc2.py
|
import requests,re,subprocess,os,time,random,shutil,traceback,sys
from threading import Thread,Timer
from websocket import create_connection
import json
from datetime import datetime
from requests.utils import dict_from_cookiejar
import http.cookiejar as cj
from streamlink import Streamlink
from livestreamer import Livestreamer
if not os.path.exists('/root/b/d/fc2'):
os.makedirs('/root/b/d/fc2')
os.system('cd /root/u;bash fct.sh')
trytimes = 1#input('重试次数')
threads = 1#input('线程数')
users = []
recording = []
class FC2():
url_login = 'https://secure.id.fc2.com/index.php?mode=login&switch_language=en'
url_member_api = 'https://live.fc2.com/api/memberApi.php'
url_server = 'https://live.fc2.com/api/getControlServer.php'
count = 0
count_ping = 0
host_data = ''
channel_data=''
profile_data=''
host_found = False
def __init__(self,userid,session):
self.user_id = userid
self.thread=None
self.session=session
self.url = 'https://live.fc2.com/'+str(userid)+'/'
self.sameid = 1
self.ex = 1
self.end = False
def login(self,relogin = 0):
if os.path.exists('cookies.txt') and not relogin:
self.session.cookies = cj.LWPCookieJar(filename='cookies.txt')
self.session.cookies.load(filename='cookies.txt', ignore_discard=True)
else:
self.session.cookies = cj.LWPCookieJar()
self.session.get(self.url)
data = {
'email':'48161925@qq.com',
'pass':'aa678678',
'done':'livechat',
'keep_login': 1
}
self.session.post(self.url_login,data=data)
self.session.cookies.save(filename='cookies.txt', ignore_discard=True, ignore_expires=True)
#cookies_list = self.save_cookies()
return 1#self.cmp_cookies_list(cookies_list)
def cmp_cookies_list(self,cookies_list):
required_cookies = [
'FCSID', 'fcu', 'fgcv', 'glgd_val',
'login_status', 'PHPSESSID', 'secure_check_fc2',
]
c_count = 0
for c in required_cookies:
if c in cookies_list:
c_count += 1
return (c_count == len(required_cookies))
def get_version(self, user_id):
data = {
'user': 1,
'channel': 1,
'profile': 1,
'streamid': int(user_id)
}
sys.stdout.write('\r\033[K'+str(user_id)+' get version')
res = self.session.post(self.url_member_api, data=data,timeout=10)
#print('\r\033[K',res.text)
#time.sleep(100)
try:
res_data = res.json()
except Exception as e:
print (e)
return False
self.profile_data = res_data['data']['profile_data']
channel_data = self.channel_data = res_data['data']['channel_data']
user_data = res_data['data']['user_data']
if (channel_data['login_only'] != 0 and user_data['is_login'] != 1):
sys.stdout.write(f'\r\033[K{self.user_id}A login is required for this stream.')
return 'login'
if channel_data['fee'] != 0:
sys.stdout.write('\rOnly streams without a fee are supported.'+str(self.user_id))
return False
version = channel_data['version']
if (version):
if user_data['is_login']:
sys.stdout.write('\rLogged in as {0}'.format(user_data['name']))
#print(channel_data['channelid'],' Found version: {0}'.format(version))
return version
def get_ws_url(self, user_id, version):
sys.stdout.write('\r_get_ws_url ...')
#orz = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJvcnoiOiJkYjQzNzVmYTM4NmY1NDdiOTI5OGMxNTE0NjJkNmFjMDZjZGIyMDQ2In0.ofWxZb9_CNUbpOsi3OCfj_0cMEaUO7S7VLWIIL0vO4s'
data = {
'channel_id': user_id,
'channel_version': version,
'client_type': 'pc',
'client_app': 'browser_hls',
'client_version':'1.6.3%0A+%5B1%5D',
'orz':'',
'mode':'play'
}
res = self.session.post(self.url_server, data=data)
w_data = res.json()
if w_data['status'] == 11:
sys.stdout.write('\rThe broadcaster is currently not available')
return None
ws_url = '{0}?control_token={1}'.format(
w_data['url'], w_data['control_token'])
#print('WS URL: {0}'.format(ws_url))
return ws_url
def payload_msg(self, name):
''' Format the WebSocket message '''
self.count_ping += 1
payload = json.dumps(
{
'name': str(name),
'arguments': {},
'id': int(self.count_ping)
}
)
return payload
def get_ws_data(self, ws_url):
ws = create_connection(ws_url)
ws.send(self.payload_msg('get_hls_information'))
#ws.send(self.payload_msg('get_media_server_information'))
def ws_ping():
''' ping the WebSocket '''
if ws.connected is True:
try:
t1 = Timer(30.0, ws_ping)
t1.daemon = True
t1.start()
ws.send(self.payload_msg('heartbeat'))
except:
pass
def ws_recv():
''' print WebSocket messages '''
while True:
self.count += 1
try:
rdata = ws.recv()
data = json.loads(rdata)
except:
print('\r\033[K',rdata)
break
#time_utc = datetime.utcnow().strftime('%H:%M:%S UTC')
#if data['name'] not in ['comment', 'ng_commentq',
# 'user_count', 'ng_comment']:
# print('{0} - {1} - {2}'.format(
# time_utc, self.count, data['name']))
if self.end:
break
if (data['name'] == '_response_'
and data['arguments'].get('playlists')):
sys.stdout.write('\rFound host data')
self.host_data = data
self.host_found = True
elif data['name'] == 'media_connection':
sys.stdout.write('\rsuccessfully opened stream')
elif data['name'] == 'control_disconnection':
if self.count <= 30:
# User with points restricted program being broadcasted
self.count = 30
break
if data.get('arguments').get('code') == 4512:
sys.stdout.write('\rDisconnected from Server')
break
elif data['name'] == 'publish_stop':
print('\r\033[K',f'{self.user_id}Stream ended')
break
elif data['name'] == 'channel_information':
if data['arguments'].get('fee') != 0:
print(f'\r\033[K{self.user_id}Stream requires a fee now.')
time.sleep(5)
break
elif data['name'] == 'media_disconnection':
if data.get('arguments').get('code') == 104:
print('\r\033[K','Disconnected. '
'Multiple connections has been detected.')
break
elif data.get('arguments').get('code'):
print('\r\033[K','error code {0}'.format(
data['arguments']['code']))
break
ws.close()
# WebSocket background process
try:
ws_ping()
except:
print('\r\033[K',f'{self.user_id}ws_ping 出错')
t2 = Thread(target=ws_recv,daemon=True)
t2.daemon = True
t2.start()
# wait for the WebSocket
host_timeout = False
while self.host_found is False:
if self.host_found is True:
break
if self.count >= 30:
host_timeout = True
break
time.sleep(3)
sys.stdout.write('\rhost_timeout is {0}'.format(host_timeout))
if host_timeout:
return False
return True
def get_rtmp(self, data):
app = '{0}?media_token={1}'.format(
data['application'], data['media_token'])
host = data['host']
params = {
'app': app,
'flashVer': 'WIN 29,0,0,140',
'swfUrl': 'https://live.fc2.com/swf/liveVideo.swf',
'tcUrl': 'rtmp://{0}/{1}'.format(host, app),
'live': 'yes',
'pageUrl': self.url,
'playpath': data['play_rtmp_stream'],
'host': host,
}
yield 'live', RTMPStream(self.session, params)
def get_streams(self):
self.session.headers.update({
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
'Referer': self.url
})
cookies_list = []
for k in dict_from_cookiejar(self.session.cookies):
cookies_list.append(k)
_authed = self.cmp_cookies_list(cookies_list)
'''
if _authed:
print('Attempting to authenticate using cached cookies')
elif (not _authed):
if not self.login():
print('Failed to login, check your username/password')
'''
user_id =self.user_id
version = self.get_version(user_id)
logintime=0
runtime=0
while(version == 'login'):
if runtime:
break
if logintime :
runtime+=1
os.system('cd /root/u;rm cookies.txt')
self.login()
version = self.get_version(user_id)
logintime+=1
if(version and version !='login'):
ws_url = self.get_ws_url(user_id, version)
if self.get_ws_data(ws_url):
sys.stdout.write('\r \033[K ok')
return True
return False
#return self.get_rtmp(self.host_data['arguments'])
def main(test=None):
global users
if not os.path.exists('cookies.txt'):
aa = requests.session()
aa.keep_alive = False
aa.headers.update({
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
})
x=FC2(983226,aa)
x.login()
if test:
r = requests.session()
r.keep_alive=False
a = FC2(int(test),r)
dodownload(a)
else:
while 1:
users=[]
fav = requests.session()
fav.keep_alive = False
fav.cookies = cj.LWPCookieJar(filename='cookies.txt')
fav.cookies.load(filename='cookies.txt',ignore_discard=True)
allres = fav.post('https://live.fc2.com/adult/contents/allchannellist.php').json()
channel = allres['channel']
try:
res = fav.post('https://live.fc2.com/adult/contents/favorite.php').json()
except:
traceback.print_exc()
time.sleep(10)
continue
if res['status'] == 0:
aa = requests.session()
aa.keep_alive = False
aa.headers.update({
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
})
x=FC2(983226,aa)
x.login(1)
else:
data = res['data']
for i in data:
if(i):
for ii in channel:
if ii['id'] == str(i):
if not ii['pay']:
r =requests.session()
r.keep_alive = False
a=FC2(int(i),r)
users.append(a)
break
'''if os.path.exists('fuser.txt'):
rr=open('fuser.txt','r')
for i in rr.readlines():
i=i.strip()
if i:
r =requests.session()
r.keep_alive = False
a=FC2(int(i),r)
users.append(a)
rr.close()'''
for a in users:
if not a.user_id in recording:
a.thread=t=Thread(target=dodownload,args=(a,),name=a.user_id,daemon=True)
t.start()
sys.stdout.write(f'\r\033[K正在录制{len(recording)}')
time.sleep(random.randint(5,10))
def dodownload(a):
if a.user_id in recording:
return
recording.append(a.user_id)
print(f'\r\033[K{a.user_id}start')
live=a.get_streams()
if live:
playlists=a.host_data['arguments']['playlists']
for i in playlists:
if i['mode'] == 0 or i['mode']== '0':
print(f'\r\033[K{a.user_id}获取主列表{i["url"]}')
master = i['url']
#session = Streamlink()
session = Livestreamer()
if threads:
session.set_option('hls-segment-threads',int(threads))
if trytimes:
session.set_option('hls-segment-attempts',int(trytimes))
session.set_option('hls-live-edge',9999)
session.set_option('hls-segment-timeout',6.0)
session.set_option('hls-timeout',10.0)
session.set_loglevel("none")
#cmd = ['streamlink','hls://{}'.format(master),'best','-o','/root/te/t.ts']
#subprocess.call(cmd)
streams = session.streams('hlsvariant://'+master)
stream = streams["best"]
#print(stream.url)
error = 0
rstr = r"[\/\\\:\*\?\"\<\>\|\- \n]"
oname = a.profile_data['name']
otitle = a.channel_data['title']
name = re.sub(rstr,"_",oname)
title = re.sub(rstr,"_",otitle)
path = '/root/b/d/fc2/'+str(a.user_id)
if not os.path.exists(path):
os.makedirs(path)
userid = str(a.user_id)
while(not error):
if a.sameid == 0:
break
filename = path+'/'+userid+'-'+time.strftime('%y%m%d_%H%M%S')+'-'+name+'-'+title+'.ts'
if len(filename) >=130:
title = '_'
filename = path+'/'+userid+'-'+time.strftime('%y%m%d_%H%M%S')+'-'+name+'-'+title+'.ts'
fs = 0
try:
'''
cmd = ['ffmpeg','-loglevel','quiet','-y','-i',master,'-c','copy','-fs','1073741824',filename]
#cmd = ['ffmpeg','-y','-i',master,'-c','copy','-fs','1073741824',filename,'-loglevel','debug']
error=subprocess.call(cmd)
'''
fd = stream.open()
f = open(filename,'wb')
desize = 1024*1024*1024
while 1:
ddata = fd.read(8192)
if ddata:
fs+=f.write(ddata)
#if fs % 64 == 0:
# sys.stdout.write(f'\r\033[K正在录制{len(recording)}{name}{userid}---{round(fs/1024/1024,2)}m')
if fs>=desize:
fs=0
f.close()
print(f'\r\033[K{filename}文件大小达到限制,切割')
shutil.move(filename,'/root/b/d/fc2')
filename = path+'/'+userid+'-'+time.strftime('%y%m%d_%H%M%S')+'-'+name+'-'+title+'.ts'
f = open(filename,'wb')
else:
print(f'{userid}停止录制')
break
except Exception as e:
print(f'\r\033[K{a.user_id}',e)
#traceback.print_exc()
finally:
if 'fd' in locals():
fd.close()
if 'f' in locals():
f.close()
ff = os.path.getsize(filename)
if ff<=1024*100:
print(f'\r\033[K{userid}文件下载失败')
#cmd = ['ffmpeg','-y','-i',master,'-c','copy','-fs','1073741824',filename,'-loglevel','debug']
#error=subprocess.call(cmd)
shutil.move(filename,'/root/b/d/fc2')
os.rmdir(path)
break
#'''
print(error)
break
a.end = True
if a.user_id in recording:
print(f'\r\033[K{a.user_id}从列表删除')
recording.remove(a.user_id)
else:
print(f'\r\033[K{a.user_id}在列表中找不到{recording}')
time.sleep(5)
del a
if __name__ =='__main__':
test=''#input('testid:')
if test:
main(test)
else:
main()
|
cisd.py
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Solve CISD equation H C = C e where e = E_HF + E_CORR
'''
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import ccsd_rdm
from pyscf.fci import cistring
from functools import reduce
from pyscf import __config__
BLKMIN = getattr(__config__, 'ci_cisd_blkmin', 4)
def kernel(myci, eris, ci0=None, max_cycle=50, tol=1e-8, verbose=logger.INFO):
log = logger.new_logger(myci, verbose)
mol = myci.mol
diag = myci.make_diagonal(eris)
ehf = diag[0]
diag -= ehf
if ci0 is None:
ci0 = myci.get_init_guess(eris=eris, nroots=myci.nroots, diag=diag)[1]
def op(xs):
return [myci.contract(x, eris) for x in xs]
def precond(x, e, *args):
diagd = diag - (e-myci.level_shift)
diagd[abs(diagd)<1e-8] = 1e-8
return x / diagd
if myci._dot is not None:
nmo = myci.nmo
nocc = myci.nocc
def cisd_dot(x1, x2):
return myci._dot(x1, x2, nmo, nocc)
else:
cisd_dot = numpy.dot
conv, ecisd, ci = lib.davidson1(op, ci0, precond, tol=tol,
max_cycle=max_cycle, max_space=myci.max_space,
lindep=myci.lindep, dot=cisd_dot,
nroots=myci.nroots, verbose=log)
if myci.nroots == 1:
conv = conv[0]
ecisd = ecisd[0]
ci = ci[0]
return conv, ecisd, ci
def make_diagonal(myci, eris):
# DO NOT use eris.mo_energy, it may differ to eris.fock.diagonal()
mo_energy = eris.fock.diagonal()
nmo = mo_energy.size
jdiag = numpy.zeros((nmo,nmo))
kdiag = numpy.zeros((nmo,nmo))
eris_oooo = _cp(eris.oooo)
nocc = eris.nocc
nvir = nmo - nocc
jdiag[:nocc,:nocc] = numpy.einsum('iijj->ij', eris.oooo)
kdiag[:nocc,:nocc] = numpy.einsum('jiij->ij', eris.oooo)
jdiag[:nocc,nocc:] = numpy.einsum('iijj->ij', eris.oovv)
kdiag[:nocc,nocc:] = numpy.einsum('ijji->ij', eris.ovvo)
if eris.vvvv is not None and len(eris.vvvv.shape) == 2:
#:eris_vvvv = ao2mo.restore(1, eris.vvvv, nvir)
#:jdiag1 = numpy.einsum('iijj->ij', eris_vvvv)
diag_idx = numpy.arange(nvir)
diag_idx = diag_idx * (diag_idx + 1) // 2 + diag_idx
for i, ii in enumerate(diag_idx):
jdiag[nocc+i,nocc:] = eris.vvvv[ii][diag_idx]
jksum = (jdiag[:nocc,:nocc] * 2 - kdiag[:nocc,:nocc]).sum()
ehf = mo_energy[:nocc].sum() * 2 - jksum
e_ia = lib.direct_sum('a-i->ia', mo_energy[nocc:], mo_energy[:nocc])
e_ia -= jdiag[:nocc,nocc:] - kdiag[:nocc,nocc:]
e1diag = ehf + e_ia
e2diag = lib.direct_sum('ia+jb->ijab', e_ia, e_ia)
e2diag += ehf
e2diag += jdiag[:nocc,:nocc].reshape(nocc,nocc,1,1)
e2diag -= jdiag[:nocc,nocc:].reshape(nocc,1,1,nvir)
e2diag -= jdiag[:nocc,nocc:].reshape(1,nocc,nvir,1)
e2diag += jdiag[nocc:,nocc:].reshape(1,1,nvir,nvir)
return numpy.hstack((ehf, e1diag.reshape(-1), e2diag.reshape(-1)))
def contract(myci, civec, eris):
time0 = time.clock(), time.time()
log = logger.Logger(myci.stdout, myci.verbose)
nocc = myci.nocc
nmo = myci.nmo
nvir = nmo - nocc
nov = nocc * nvir
noo = nocc**2
c0, c1, c2 = myci.cisdvec_to_amplitudes(civec, nmo, nocc)
t2 = myci._add_vvvv(c2, eris, t2sym='jiba')
t2 *= .5 # due to t2+t2.transpose(1,0,3,2) in the end
time1 = log.timer_debug1('vvvv', *time0)
foo = eris.fock[:nocc,:nocc].copy()
fov = eris.fock[:nocc,nocc:].copy()
fvv = eris.fock[nocc:,nocc:].copy()
t1 = fov * c0
t1 += numpy.einsum('ib,ab->ia', c1, fvv)
t1 -= numpy.einsum('ja,ji->ia', c1, foo)
t2 += lib.einsum('kilj,klab->ijab', _cp(eris.oooo)*.5, c2)
t2 += lib.einsum('ijac,bc->ijab', c2, fvv)
t2 -= lib.einsum('kj,kiba->jiba', foo, c2)
t2 += numpy.einsum('ia,jb->ijab', c1, fov)
unit = nocc*nvir**2 + nocc**2*nvir*3 + 1
max_memory = max(0, myci.max_memory - lib.current_memory()[0])
blksize = min(nvir, max(BLKMIN, int(max_memory*.9e6/8/unit)))
log.debug1('max_memory %d MB, nocc,nvir = %d,%d blksize = %d',
max_memory, nocc, nvir, blksize)
nvir_pair = nvir * (nvir+1) // 2
for p0, p1 in lib.prange(0, nvir, blksize):
eris_oVoV = _cp(_cp(eris.oovv[:,:,p0:p1]).transpose(0,2,1,3))
tmp = lib.einsum('kbjc,ikca->jiba', eris_oVoV, c2)
t2[:,:,p0:p1] -= tmp*.5
t2[:,:,p0:p1] -= tmp.transpose(1,0,2,3)
tmp = None
eris_ovvo = _cp(eris.ovvo[:,p0:p1])
t2[:,:,p0:p1] += eris_ovvo.transpose(0,3,1,2) * (c0*.5)
t1 += numpy.einsum('ia,iabj->jb', c1[:,p0:p1], eris_ovvo) * 2
t1[:,p0:p1] -= numpy.einsum('ib,iajb->ja', c1, eris_oVoV)
ovov = -.5 * eris_oVoV
ovov += eris_ovvo.transpose(3,1,0,2)
eris_oVoV = eris_oovv = None
theta = c2[:,:,p0:p1].transpose(2,0,1,3) * 2
theta-= c2[:,:,p0:p1].transpose(2,1,0,3)
for j in range(nocc):
t2[:,j] += lib.einsum('ckb,ckia->iab', ovov[j], theta)
tmp = ovov = None
t1 += numpy.einsum('aijb,ia->jb', theta, fov[:,p0:p1])
eris_ovoo = _cp(eris.ovoo[:,p0:p1])
t1 -= lib.einsum('bjka,jbki->ia', theta, eris_ovoo)
t2[:,:,p0:p1] -= lib.einsum('jbik,ka->jiba', eris_ovoo.conj(), c1)
eris_vooo = None
eris_ovvv = eris.get_ovvv(slice(None), slice(p0,p1)).conj()
t1 += lib.einsum('cjib,jcba->ia', theta, eris_ovvv)
t2[:,:,p0:p1] += lib.einsum('iacb,jc->ijab', eris_ovvv, c1)
tmp = eris_ovvv = None
#:t2 + t2.transpose(1,0,3,2)
for i in range(nocc):
if i > 0:
t2[i,:i]+= t2[:i,i].transpose(0,2,1)
t2[:i,i] = t2[i,:i].transpose(0,2,1)
t2[i,i] = t2[i,i] + t2[i,i].T
t0 = numpy.einsum('ia,ia->', fov, c1) * 2
t0 += numpy.einsum('iabj,ijab->', eris.ovvo, c2) * 2
t0 -= numpy.einsum('iabj,jiab->', eris.ovvo, c2)
cinew = numpy.hstack((t0, t1.ravel(), t2.ravel()))
return cinew
def amplitudes_to_cisdvec(c0, c1, c2):
return numpy.hstack((c0, c1.ravel(), c2.ravel()))
def cisdvec_to_amplitudes(civec, nmo, nocc):
nvir = nmo - nocc
c0 = civec[0]
c1 = civec[1:nocc*nvir+1].reshape(nocc,nvir)
c2 = civec[nocc*nvir+1:].reshape(nocc,nocc,nvir,nvir)
return c0, c1, c2
def dot(v1, v2, nmo, nocc):
nvir = nmo - nocc
hijab = v2[1+nocc*nvir:].reshape(nocc,nocc,nvir,nvir)
cijab = v1[1+nocc*nvir:].reshape(nocc,nocc,nvir,nvir)
val = numpy.dot(v1, v2) * 2 - v1[0]*v2[0]
val-= numpy.einsum('jiab,ijab->', cijab, hijab)
return val
def t1strs(norb, nelec):
'''Compute the FCI strings (address) for CIS single-excitation amplitudes
and the signs of the coefficients when transferring the reference from
physics vacuum to HF vacuum.
'''
addrs, signs = tn_addrs_signs(norb, nelec, 1)
return addrs, signs
def tn_addrs_signs(norb, nelec, n_excite):
'''Compute the FCI strings (address) for CIS n-excitation amplitudes and
the signs of the coefficients when transferring the reference from physics
vacuum to HF vacuum.
'''
if n_excite > nelec:
print("Warning: Not enough occupied orbitals to excite.")
return [0], [0]
nocc = nelec
hole_strs = cistring.gen_strings4orblist(range(nocc), nocc - n_excite)
# For HF vacuum, hole operators are ordered from low-lying to high-lying
# orbitals. It leads to the opposite string ordering.
hole_strs = hole_strs[::-1]
hole_sum = numpy.zeros(len(hole_strs), dtype=int)
for i in range(nocc):
hole_at_i = (hole_strs & (1<<i)) == 0
hole_sum[hole_at_i] += i
# The hole operators are listed from low-lying to high-lying orbitals
# (from left to right). For i-th (0-based) hole operator, the number of
# orbitals which are higher than i determines the sign. This number
# equals to nocc-(i+1). After removing the highest hole operator, nocc
# becomes nocc-1, the sign for next hole operator j will be associated to
# nocc-1-(j+1). By iteratively calling this procedure, the overall sign
# for annihilating three holes is (-1)**(3*nocc - 6 - sum i)
sign = (-1) ** (n_excite * nocc - n_excite*(n_excite+1)//2 - hole_sum)
particle_strs = cistring.gen_strings4orblist(range(nocc, norb), n_excite)
strs = hole_strs[:,None] ^ particle_strs
addrs = cistring.strs2addr(norb, nocc, strs.ravel())
signs = numpy.vstack([sign] * len(particle_strs)).T.ravel()
return addrs, signs
def to_fcivec(cisdvec, norb, nelec, frozen=0):
'''Convert CISD coefficients to FCI coefficients'''
if isinstance(nelec, (int, numpy.number)):
nelecb = nelec//2
neleca = nelec - nelecb
else:
neleca, nelecb = nelec
assert(neleca == nelecb)
frozen_mask = numpy.zeros(norb, dtype=bool)
if isinstance(frozen, (int, numpy.integer)):
nfroz = frozen
frozen_mask[:frozen] = True
else:
nfroz = len(frozen)
frozen_mask[frozen] = True
nocc = numpy.count_nonzero(~frozen_mask[:neleca])
nmo = norb - nfroz
nvir = nmo - nocc
c0, c1, c2 = cisdvec_to_amplitudes(cisdvec, nmo, nocc)
t1addr, t1sign = tn_addrs_signs(nmo, nocc, 1)
na = cistring.num_strings(nmo, nocc)
fcivec = numpy.zeros((na,na))
fcivec[0,0] = c0
fcivec[0,t1addr] = fcivec[t1addr,0] = c1.ravel() * t1sign
c2ab = c2.transpose(0,2,1,3).reshape(nocc*nvir,-1)
c2ab = numpy.einsum('i,j,ij->ij', t1sign, t1sign, c2ab)
fcivec[t1addr[:,None],t1addr] = c2ab
if nocc > 1 and nvir > 1:
c2aa = c2 - c2.transpose(1,0,2,3)
ooidx = numpy.tril_indices(nocc, -1)
vvidx = numpy.tril_indices(nvir, -1)
c2aa = c2aa[ooidx][:,vvidx[0],vvidx[1]]
t2addr, t2sign = tn_addrs_signs(nmo, nocc, 2)
fcivec[0,t2addr] = fcivec[t2addr,0] = c2aa.ravel() * t2sign
if nfroz == 0:
return fcivec
assert(norb < 63)
strs = cistring.gen_strings4orblist(range(norb), neleca)
na = len(strs)
count = numpy.zeros(na, dtype=int)
parity = numpy.zeros(na, dtype=bool)
core_mask = numpy.ones(na, dtype=bool)
# During the loop, count saves the number of occupied orbitals that
# lower (with small orbital ID) than the present orbital i.
# Moving all the frozen orbitals to the beginning of the orbital list
# (before the occupied orbitals) leads to parity odd (= True, with
# negative sign) or even (= False, with positive sign).
for i in range(norb):
if frozen_mask[i]:
if i < neleca:
# frozen occupied orbital should be occupied
core_mask &= (strs & (1<<i)) != 0
parity ^= (count & 1) == 1
else:
# frozen virtual orbital should not be occupied.
# parity is not needed since it's unoccupied
core_mask &= (strs & (1<<i)) == 0
else:
count += (strs & (1<<i)) != 0
sub_strs = strs[core_mask & (count == nocc)]
addrs = cistring.strs2addr(norb, neleca, sub_strs)
fcivec1 = numpy.zeros((na,na))
fcivec1[addrs[:,None],addrs] = fcivec
fcivec1[parity,:] *= -1
fcivec1[:,parity] *= -1
return fcivec1
def from_fcivec(ci0, norb, nelec, frozen=0):
'''Extract CISD coefficients from FCI coefficients'''
if frozen is not 0:
raise NotImplementedError
if isinstance(nelec, (int, numpy.number)):
nelecb = nelec//2
neleca = nelec - nelecb
else:
neleca, nelecb = nelec
nocc = neleca
nvir = norb - nocc
t1addr, t1sign = t1strs(norb, nocc)
c0 = ci0[0,0]
c1 = ci0[0,t1addr] * t1sign
c2 = numpy.einsum('i,j,ij->ij', t1sign, t1sign, ci0[t1addr[:,None],t1addr])
c1 = c1.reshape(nocc,nvir)
c2 = c2.reshape(nocc,nvir,nocc,nvir).transpose(0,2,1,3)
return amplitudes_to_cisdvec(c0, c1, c2)
def overlap(cibra, ciket, nmo, nocc, s=None):
'''Overlap between two CISD wavefunctions.
Args:
s : 2D array
The overlap matrix of non-orthogonal one-particle basis
'''
if s is None:
return dot(cibra, ciket, nmo, nocc)
DEBUG = True
nvir = nmo - nocc
nov = nocc * nvir
bra0, bra1, bra2 = cisdvec_to_amplitudes(cibra, nmo, nocc)
ket0, ket1, ket2 = cisdvec_to_amplitudes(ciket, nmo, nocc)
# Sort the ket orbitals to make the orbitals in bra one-one mapt to orbitals
# in ket.
if ((not DEBUG) and
abs(numpy.linalg.det(s[:nocc,:nocc]) - 1) < 1e-2 and
abs(numpy.linalg.det(s[nocc:,nocc:]) - 1) < 1e-2):
ket_orb_idx = numpy.where(abs(s) > 0.9)[1]
s = s[:,ket_orb_idx]
oidx = ket_orb_idx[:nocc]
vidx = ket_orb_idx[nocc:] - nocc
ket1 = ket1[oidx[:,None],vidx]
ket2 = ket2[oidx[:,None,None,None],oidx[:,None,None],vidx[:,None],vidx]
ooidx = numpy.tril_indices(nocc, -1)
vvidx = numpy.tril_indices(nvir, -1)
bra2aa = bra2 - bra2.transpose(1,0,2,3)
bra2aa = lib.take_2d(bra2aa.reshape(nocc**2,nvir**2),
ooidx[0]*nocc+ooidx[1], vvidx[0]*nvir+vvidx[1])
ket2aa = ket2 - ket2.transpose(1,0,2,3)
ket2aa = lib.take_2d(ket2aa.reshape(nocc**2,nvir**2),
ooidx[0]*nocc+ooidx[1], vvidx[0]*nvir+vvidx[1])
occlist0 = numpy.arange(nocc).reshape(1,nocc)
occlists = numpy.repeat(occlist0, 1+nov+bra2aa.size, axis=0)
occlist0 = occlists[:1]
occlist1 = occlists[1:1+nov]
occlist2 = occlists[1+nov:]
ia = 0
for i in range(nocc):
for a in range(nocc, nmo):
occlist1[ia,i] = a
ia += 1
ia = 0
for i in range(nocc):
for j in range(i):
for a in range(nocc, nmo):
for b in range(nocc, a):
occlist2[ia,i] = a
occlist2[ia,j] = b
ia += 1
na = len(occlists)
if DEBUG:
trans = numpy.empty((na,na))
for i, idx in enumerate(occlists):
s_sub = s[idx].T.copy()
minors = s_sub[occlists]
trans[i,:] = numpy.linalg.det(minors)
# Mimic the transformation einsum('ab,ap->pb', FCI, trans).
# The wavefunction FCI has the [excitation_alpha,excitation_beta]
# representation. The zero blocks like FCI[S_alpha,D_beta],
# FCI[D_alpha,D_beta], are explicitly excluded.
bra_mat = numpy.zeros((na,na))
bra_mat[0,0] = bra0
bra_mat[0,1:1+nov] = bra_mat[1:1+nov,0] = bra1.ravel()
bra_mat[0,1+nov:] = bra_mat[1+nov:,0] = bra2aa.ravel()
bra_mat[1:1+nov,1:1+nov] = bra2.transpose(0,2,1,3).reshape(nov,nov)
ket_mat = numpy.zeros((na,na))
ket_mat[0,0] = ket0
ket_mat[0,1:1+nov] = ket_mat[1:1+nov,0] = ket1.ravel()
ket_mat[0,1+nov:] = ket_mat[1+nov:,0] = ket2aa.ravel()
ket_mat[1:1+nov,1:1+nov] = ket2.transpose(0,2,1,3).reshape(nov,nov)
ovlp = lib.einsum('ab,ap,bq,pq->', bra_mat, trans, trans, ket_mat)
else:
nov1 = 1 + nov
noovv = bra2aa.size
bra_SS = numpy.zeros((nov1,nov1))
bra_SS[0,0] = bra0
bra_SS[0,1:] = bra_SS[1:,0] = bra1.ravel()
bra_SS[1:,1:] = bra2.transpose(0,2,1,3).reshape(nov,nov)
ket_SS = numpy.zeros((nov1,nov1))
ket_SS[0,0] = ket0
ket_SS[0,1:] = ket_SS[1:,0] = ket1.ravel()
ket_SS[1:,1:] = ket2.transpose(0,2,1,3).reshape(nov,nov)
trans_SS = numpy.empty((nov1,nov1))
trans_SD = numpy.empty((nov1,noovv))
trans_DS = numpy.empty((noovv,nov1))
occlist01 = occlists[:nov1]
for i, idx in enumerate(occlist01):
s_sub = s[idx].T.copy()
minors = s_sub[occlist01]
trans_SS[i,:] = numpy.linalg.det(minors)
minors = s_sub[occlist2]
trans_SD[i,:] = numpy.linalg.det(minors)
s_sub = s[:,idx].copy()
minors = s_sub[occlist2]
trans_DS[:,i] = numpy.linalg.det(minors)
ovlp = lib.einsum('ab,ap,bq,pq->', bra_SS, trans_SS, trans_SS, ket_SS)
ovlp+= lib.einsum('ab,a ,bq, q->', bra_SS, trans_SS[:,0], trans_SD, ket2aa.ravel())
ovlp+= lib.einsum('ab,ap,b ,p ->', bra_SS, trans_SD, trans_SS[:,0], ket2aa.ravel())
ovlp+= lib.einsum(' b, p,bq,pq->', bra2aa.ravel(), trans_SS[0,:], trans_DS, ket_SS)
ovlp+= lib.einsum(' b, p,b ,p ->', bra2aa.ravel(), trans_SD[0,:], trans_DS[:,0], ket2aa.ravel())
ovlp+= lib.einsum('a ,ap, q,pq->', bra2aa.ravel(), trans_DS, trans_SS[0,:], ket_SS)
ovlp+= lib.einsum('a ,a , q, q->', bra2aa.ravel(), trans_DS[:,0], trans_SD[0,:], ket2aa.ravel())
# FIXME: whether to approximate the overlap between double excitation coefficients
if numpy.linalg.norm(bra2aa)*numpy.linalg.norm(ket2aa) < 1e-4:
# Skip the overlap if coefficients of double excitation are small enough
pass
if (abs(numpy.linalg.det(s[:nocc,:nocc]) - 1) < 1e-2 and
abs(numpy.linalg.det(s[nocc:,nocc:]) - 1) < 1e-2):
# If the overlap matrix close to identity enough, use the <D|D'> overlap
# for orthogonal single-particle basis to approximate the overlap
# for non-orthogonal basis.
ovlp+= numpy.dot(bra2aa.ravel(), ket2aa.ravel()) * trans_SS[0,0] * 2
else:
from multiprocessing import sharedctypes, Process
buf_ctypes = sharedctypes.RawArray('d', noovv)
trans_ket = numpy.ndarray(noovv, buffer=buf_ctypes)
def trans_dot_ket(i0, i1):
for i in range(i0, i1):
s_sub = s[occlist2[i]].T.copy()
minors = s_sub[occlist2]
trans_ket[i] = numpy.linalg.det(minors).dot(ket2aa.ravel())
nproc = lib.num_threads()
if nproc > 1:
seg = (noovv+nproc-1) // nproc
ps = []
for i0,i1 in lib.prange(0, noovv, seg):
p = Process(target=trans_dot_ket, args=(i0,i1))
ps.append(p)
p.start()
[p.join() for p in ps]
else:
trans_dot_ket(0, noovv)
ovlp+= numpy.dot(bra2aa.ravel(), trans_ket) * trans_SS[0,0] * 2
return ovlp
def make_rdm1(myci, civec=None, nmo=None, nocc=None, ao_repr=False):
'''
Spin-traced one-particle density matrix in MO basis (the occupied-virtual
blocks from the orbital response contribution are not included).
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
if civec is None: civec = myci.ci
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
d1 = _gamma1_intermediates(myci, civec, nmo, nocc)
return ccsd_rdm._make_rdm1(myci, d1, with_frozen=True, ao_repr=ao_repr)
def make_rdm2(myci, civec=None, nmo=None, nocc=None):
r'''
Spin-traced two-particle density matrix in MO basis
dm2[p,q,r,s] = \sum_{sigma,tau} <p_sigma^\dagger r_tau^\dagger s_tau q_sigma>
Note the contraction between ERIs (in Chemist's notation) and rdm2 is
E = einsum('pqrs,pqrs', eri, rdm2)
'''
if civec is None: civec = myci.ci
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
d1 = _gamma1_intermediates(myci, civec, nmo, nocc)
f = lib.H5TmpFile()
d2 = _gamma2_outcore(myci, civec, nmo, nocc, f, False)
return ccsd_rdm._make_rdm2(myci, d1, d2, with_dm1=True, with_frozen=True)
def _gamma1_intermediates(myci, civec, nmo, nocc):
c0, c1, c2 = myci.cisdvec_to_amplitudes(civec, nmo, nocc)
dvo = c0.conj() * c1.T
dvo += numpy.einsum('jb,ijab->ai', c1.conj(), c2) * 2
dvo -= numpy.einsum('jb,ijba->ai', c1.conj(), c2)
dov = dvo.T.conj()
theta = c2*2 - c2.transpose(0,1,3,2)
doo =-numpy.einsum('ia,ka->ik', c1.conj(), c1)
doo -= lib.einsum('ijab,ikab->jk', c2.conj(), theta)
dvv = numpy.einsum('ia,ic->ac', c1, c1.conj())
dvv += lib.einsum('ijab,ijac->bc', theta, c2.conj())
return doo, dov, dvo, dvv
def _gamma2_intermediates(myci, civec, nmo, nocc, compress_vvvv=False):
f = lib.H5TmpFile()
_gamma2_outcore(myci, civec, nmo, nocc, f, compress_vvvv)
d2 = (f['dovov'].value, f['dvvvv'].value, f['doooo'].value, f['doovv'].value,
f['dovvo'].value, None, f['dovvv'].value, f['dooov'].value)
return d2
def _gamma2_outcore(myci, civec, nmo, nocc, h5fobj, compress_vvvv=False):
log = logger.Logger(myci.stdout, myci.verbose)
nocc = myci.nocc
nmo = myci.nmo
nvir = nmo - nocc
nvir_pair = nvir * (nvir+1) // 2
c0, c1, c2 = myci.cisdvec_to_amplitudes(civec, nmo, nocc)
h5fobj['dovov'] = (2*c0*c2.conj().transpose(0,2,1,3) -
c0*c2.conj().transpose(1,2,0,3))
doooo = lib.einsum('ijab,klab->ijkl', c2.conj(), c2)
h5fobj['doooo'] = doooo.transpose(0,2,1,3) - doooo.transpose(1,2,0,3)*.5
doooo = None
dooov =-lib.einsum('ia,klac->klic', c1*2, c2.conj())
h5fobj['dooov'] = dooov.transpose(0,2,1,3)*2 - dooov.transpose(1,2,0,3)
dooov = None
#:dvovv = numpy.einsum('ia,ikcd->akcd', c1, c2) * 2
#:dvvvv = lib.einsum('ijab,ijcd->abcd', c2, c2)
max_memory = max(0, myci.max_memory - lib.current_memory()[0])
unit = max(nocc**2*nvir*2+nocc*nvir**2*3 + 1, nvir**3*2+nocc*nvir**2 + 1)
blksize = min(nvir, max(BLKMIN, int(max_memory*.9e6/8/unit)))
iobuflen = int(256e6/8/blksize)
log.debug1('rdm intermediates: block size = %d, nvir = %d in %d blocks',
blksize, nocc, int((nvir+blksize-1)/blksize))
dtype = numpy.result_type(civec).char
dovvv = h5fobj.create_dataset('dovvv', (nocc,nvir,nvir,nvir), dtype,
chunks=(nocc,min(nocc,nvir),1,nvir))
if compress_vvvv:
dvvvv = h5fobj.create_dataset('dvvvv', (nvir_pair,nvir_pair), dtype)
else:
dvvvv = h5fobj.create_dataset('dvvvv', (nvir,nvir,nvir,nvir), dtype)
for istep, (p0, p1) in enumerate(lib.prange(0, nvir, blksize)):
theta = c2[:,:,p0:p1] - c2[:,:,p0:p1].transpose(1,0,2,3) * .5
gvvvv = lib.einsum('ijab,ijcd->abcd', theta.conj(), c2)
if compress_vvvv:
# symmetrize dvvvv because it does not affect the results of cisd_grad
# dvvvv = (dvvvv+dvvvv.transpose(0,1,3,2)) * .5
# dvvvv = (dvvvv+dvvvv.transpose(1,0,2,3)) * .5
# now dvvvv == dvvvv.transpose(0,1,3,2) == dvvvv.transpose(1,0,3,2)
tmp = numpy.empty((nvir,nvir,nvir))
tmpvvvv = numpy.empty((p1-p0,nvir,nvir_pair))
for i in range(p1-p0):
tmp[:] = gvvvv[i].conj().transpose(1,0,2)
lib.pack_tril(tmp+tmp.transpose(0,2,1), out=tmpvvvv[i])
# tril of (dvvvv[p0:p1,p0:p1]+dvvvv[p0:p1,p0:p1].T)
for i in range(p0, p1):
for j in range(p0, i):
tmpvvvv[i-p0,j] += tmpvvvv[j-p0,i]
tmpvvvv[i-p0,i] *= 2
for i in range(p1, nvir):
off = i * (i+1) // 2
dvvvv[off+p0:off+p1] = tmpvvvv[:,i]
for i in range(p0, p1):
off = i * (i+1) // 2
if p0 > 0:
tmpvvvv[i-p0,:p0] += dvvvv[off:off+p0]
dvvvv[off:off+i+1] = tmpvvvv[i-p0,:i+1] * .25
tmp = tmpvvvv = None
else:
for i in range(p0, p1):
dvvvv[i] = gvvvv[i-p0].conj().transpose(1,0,2)
gvovv = numpy.einsum('ia,ikcd->akcd', c1[:,p0:p1].conj()*2, c2)
gvovv = gvovv.conj()
dovvv[:,:,p0:p1] = gvovv.transpose(1,3,0,2)*2 - gvovv.transpose(1,2,0,3)
theta = c2*2 - c2.transpose(1,0,2,3)
doovv = numpy.einsum('ia,kc->ikca', c1.conj(), -c1)
doovv -= lib.einsum('kjcb,kica->jiab', c2.conj(), theta)
doovv -= lib.einsum('ikcb,jkca->ijab', c2.conj(), theta)
h5fobj['doovv'] = doovv
doovv = None
dovvo = lib.einsum('ikac,jkbc->iabj', theta.conj(), theta)
dovvo += numpy.einsum('ia,kc->iack', c1.conj(), c1) * 2
h5fobj['dovvo'] = dovvo
theta = dovvo = None
dvvov = None
return (h5fobj['dovov'], h5fobj['dvvvv'], h5fobj['doooo'], h5fobj['doovv'],
h5fobj['dovvo'], dvvov , h5fobj['dovvv'], h5fobj['dooov'])
def trans_rdm1(myci, cibra, ciket, nmo=None, nocc=None):
'''
Spin-traced one-particle transition density matrix in MO basis.
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
c0bra, c1bra, c2bra = myci.cisdvec_to_amplitudes(cibra, nmo, nocc)
c0ket, c1ket, c2ket = myci.cisdvec_to_amplitudes(ciket, nmo, nocc)
dvo = c0bra.conj() * c1ket.T
dvo += numpy.einsum('jb,ijab->ai', c1bra.conj(), c2ket) * 2
dvo -= numpy.einsum('jb,ijba->ai', c1bra.conj(), c2ket)
dov = c0ket * c1bra.conj()
dov += numpy.einsum('jb,ijab->ia', c1ket, c2bra.conj()) * 2
dov -= numpy.einsum('jb,ijba->ia', c1ket, c2bra.conj())
theta = c2ket*2 - c2ket.transpose(0,1,3,2)
doo =-numpy.einsum('ia,ka->ik', c1bra.conj(), c1ket)
doo -= lib.einsum('ijab,ikab->jk', c2bra.conj(), theta)
dvv = numpy.einsum('ia,ic->ac', c1ket, c1bra.conj())
dvv += lib.einsum('ijab,ijac->bc', theta, c2bra.conj())
dm1 = numpy.empty((nmo,nmo), dtype=doo.dtype)
dm1[:nocc,:nocc] = doo * 2
dm1[:nocc,nocc:] = dov * 2
dm1[nocc:,:nocc] = dvo * 2
dm1[nocc:,nocc:] = dvv * 2
norm = dot(cibra, ciket, nmo, nocc)
dm1[numpy.diag_indices(nocc)] += 2 * norm
if not (myci.frozen is 0 or myci.frozen is None):
nmo = myci.mo_occ.size
nocc = numpy.count_nonzero(myci.mo_occ > 0)
rdm1 = numpy.zeros((nmo,nmo), dtype=dm1.dtype)
rdm1[numpy.diag_indices(nocc)] = 2 * norm
moidx = numpy.where(myci.get_frozen_mask())[0]
rdm1[moidx[:,None],moidx] = dm1
dm1 = rdm1
return dm1
def as_scanner(ci):
'''Generating a scanner/solver for CISD PES.
The returned solver is a function. This function requires one argument
"mol" as input and returns total CISD energy.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
CISD and the underlying SCF objects (conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples::
>>> from pyscf import gto, scf, ci
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1')
>>> ci_scanner = ci.CISD(scf.RHF(mol)).as_scanner()
>>> e_tot = ci_scanner(gto.M(atom='H 0 0 0; F 0 0 1.1'))
>>> e_tot = ci_scanner(gto.M(atom='H 0 0 0; F 0 0 1.5'))
'''
from pyscf import gto
if isinstance(ci, lib.SinglePointScanner):
return ci
logger.info(ci, 'Set %s as a scanner', ci.__class__)
class CISD_Scanner(ci.__class__, lib.SinglePointScanner):
def __init__(self, ci):
self.__dict__.update(ci.__dict__)
self._scf = ci._scf.as_scanner()
def __call__(self, mol_or_geom, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
mf_scanner = self._scf
mf_scanner(mol)
self.mol = mol
self.mo_coeff = mf_scanner.mo_coeff
self.mo_occ = mf_scanner.mo_occ
# FIXME: Whether to use the initial guess from last step? If root flips, large
# errors may be found in the solutions
self.kernel(self.ci, **kwargs)[0]
return self.e_tot
return CISD_Scanner(ci)
class CISD(lib.StreamObject):
'''restricted CISD
Attributes:
verbose : int
Print level. Default value equals to :class:`Mole.verbose`
max_memory : float or int
Allowed memory in MB. Default value equals to :class:`Mole.max_memory`
conv_tol : float
converge threshold. Default is 1e-9.
max_cycle : int
max number of iterations. Default is 50.
max_space : int
Davidson diagonalization space size. Default is 12.
direct : bool
AO-direct CISD. Default is False.
async_io : bool
Allow for asynchronous function execution. Default is True.
frozen : int or list
If integer is given, the inner-most orbitals are frozen from CI
amplitudes. Given the orbital indices (0-based) in a list, both
occupied and virtual orbitals can be frozen in CI calculation.
>>> mol = gto.M(atom = 'H 0 0 0; F 0 0 1.1', basis = 'ccpvdz')
>>> mf = scf.RHF(mol).run()
>>> # freeze 2 core orbitals
>>> myci = ci.CISD(mf).set(frozen = 2).run()
>>> # freeze 2 core orbitals and 3 high lying unoccupied orbitals
>>> myci.set(frozen = [0,1,16,17,18]).run()
Saved results
converged : bool
CISD converged or not
e_corr : float
CISD correlation correction
e_tot : float
Total CCSD energy (HF + correlation)
ci :
CI wavefunction coefficients
'''
conv_tol = getattr(__config__, 'ci_cisd_CISD_conv_tol', 1e-9)
max_cycle = getattr(__config__, 'ci_cisd_CISD_max_cycle', 50)
max_space = getattr(__config__, 'ci_cisd_CISD_max_space', 12)
lindep = getattr(__config__, 'ci_cisd_CISD_lindep', 1e-14)
level_shift = getattr(__config__, 'ci_cisd_CISD_level_shift', 0) # in preconditioner
direct = getattr(__config__, 'ci_cisd_CISD_direct', False)
async_io = getattr(__config__, 'ci_cisd_CISD_async_io', True)
def __init__(self, mf, frozen=0, mo_coeff=None, mo_occ=None):
if 'dft' in str(mf.__module__):
raise RuntimeError('CISD Warning: The first argument mf is a DFT object. '
'CISD calculation should be initialized with HF object.\n'
'DFT object can be converted to HF object with '
'the code below:\n'
' mf_hf = scf.RHF(mol)\n'
' mf_hf.__dict__.update(mf_dft.__dict__)\n')
if mo_coeff is None: mo_coeff = mf.mo_coeff
if mo_occ is None: mo_occ = mf.mo_occ
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
self.nroots = 1
self.frozen = frozen
self.chkfile = mf.chkfile
##################################################
# don't modify the following attributes, they are not input options
self.converged = False
self.mo_coeff = mo_coeff
self.mo_occ = mo_occ
self.e_corr = None
self.emp2 = None
self.ci = None
self._nocc = None
self._nmo = None
keys = set(('conv_tol', 'max_cycle', 'max_space', 'lindep',
'level_shift', 'direct'))
self._keys = set(self.__dict__.keys()).union(keys)
def dump_flags(self):
log = logger.Logger(self.stdout, self.verbose)
log.info('')
log.info('******** %s ********', self.__class__)
log.info('CISD nocc = %s, nmo = %s', self.nocc, self.nmo)
if self.frozen is not 0:
log.info('frozen orbitals %s', str(self.frozen))
log.info('max_cycle = %d', self.max_cycle)
log.info('direct = %d', self.direct)
log.info('conv_tol = %g', self.conv_tol)
log.info('max_cycle = %d', self.max_cycle)
log.info('max_space = %d', self.max_space)
log.info('lindep = %d', self.lindep)
log.info('nroots = %d', self.nroots)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
return self
@property
def e_tot(self):
return numpy.asarray(self.e_corr) + self._scf.e_tot
@property
def nstates(self):
return self.nroots
@nstates.setter
def nstates(self, x):
self.nroots = x
@property
def nocc(self):
return self.get_nocc()
@nocc.setter
def nocc(self, n):
self._nocc = n
@property
def nmo(self):
return self.get_nmo()
@nmo.setter
def nmo(self, n):
self._nmo = n
def vector_size(self):
'''The size of the vector which was returned from
:func:`amplitudes_to_cisdvec`
'''
nocc = self.nocc
nvir = self.nmo - nocc
return 1 + nocc*nvir + (nocc*nvir)**2
get_nocc = ccsd.get_nocc
get_nmo = ccsd.get_nmo
get_frozen_mask = ccsd.get_frozen_mask
def kernel(self, ci0=None, eris=None):
return self.cisd(ci0, eris)
def cisd(self, ci0=None, eris=None):
if eris is None:
eris = self.ao2mo(self.mo_coeff)
if self.verbose >= logger.WARN:
self.check_sanity()
self.dump_flags()
self.converged, self.e_corr, self.ci = \
kernel(self, eris, ci0, max_cycle=self.max_cycle,
tol=self.conv_tol, verbose=self.verbose)
self._finalize()
return self.e_corr, self.ci
def _finalize(self):
citype = self.__class__.__name__
if numpy.all(self.converged):
logger.info(self, '%s converged', citype)
else:
logger.info(self, '%s not converged', citype)
if self.nroots > 1:
for i,e in enumerate(self.e_tot):
logger.note(self, '%s root %d E = %.16g', citype, i, e)
else:
logger.note(self, 'E(%s) = %.16g E_corr = %.16g',
citype, self.e_tot, self.e_corr)
return self
def get_init_guess(self, eris=None, nroots=1, diag=None):
# MP2 initial guess
if eris is None: eris = self.ao2mo(self.mo_coeff)
nocc = self.nocc
mo_e = eris.mo_energy
e_ia = lib.direct_sum('i-a->ia', mo_e[:nocc], mo_e[nocc:])
ci0 = 1
ci1 = eris.fock[:nocc,nocc:] / e_ia
eris_ovvo = _cp(eris.ovvo)
ci2 = 2 * eris_ovvo.transpose(0,3,1,2)
ci2 -= eris_ovvo.transpose(0,3,2,1)
ci2 /= lib.direct_sum('ia,jb->ijab', e_ia, e_ia)
self.emp2 = numpy.einsum('ijab,iabj', ci2, eris_ovvo)
logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2)
if abs(self.emp2) < 1e-3 and abs(ci1).sum() < 1e-3:
# To avoid ci1 being stuck at local minimum
ci1 = 1e-1 / e_ia
ci_guess = amplitudes_to_cisdvec(ci0, ci1, ci2)
if nroots > 1:
civec_size = ci_guess.size
dtype = ci_guess.dtype
nroots = min(ci1.size+1, nroots) # Consider Koopmans' theorem only
if diag is None:
idx = range(1, nroots)
else:
idx = diag[:ci1.size+1].argsort()[1:nroots] # exclude HF determinant
ci_guess = [ci_guess]
for i in idx:
g = numpy.zeros(civec_size, dtype)
g[i] = 1.0
ci_guess.append(g)
return self.emp2, ci_guess
contract = contract
make_diagonal = make_diagonal
def _dot(self, x1, x2, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return dot(x1, x2, nmo, nocc)
def ao2mo(self, mo_coeff=None):
nmo = self.nmo
nao = self.mo_coeff.shape[0]
nmo_pair = nmo * (nmo+1) // 2
nao_pair = nao * (nao+1) // 2
mem_incore = (max(nao_pair**2, nmo**4) + nmo_pair**2) * 8/1e6
mem_now = lib.current_memory()[0]
if (self._scf._eri is not None and
(mem_incore+mem_now < self.max_memory) or self.mol.incore_anyway):
return ccsd._make_eris_incore(self, mo_coeff)
elif getattr(self._scf, 'with_df', None):
logger.warn(self, 'CISD detected DF being used in the HF object. '
'MO integrals are computed based on the DF 3-index tensors.\n'
'It\'s recommended to use dfccsd.CCSD for the '
'DF-CISD calculations')
return ccsd._make_df_eris_outcore(self, mo_coeff)
else:
return ccsd._make_eris_outcore(self, mo_coeff)
def _add_vvvv(self, c2, eris, out=None, t2sym=None):
return ccsd._add_vvvv(self, None, c2, eris, out, False, t2sym)
def to_fcivec(self, cisdvec, norb=None, nelec=None, frozen=0):
if norb is None: norb = self.nmo
if nelec is None: nelec = self.nocc*2
return to_fcivec(cisdvec, norb, nelec, frozen)
def from_fcivec(self, fcivec, norb=None, nelec=None):
if norb is None: norb = self.nmo
if nelec is None: nelec = self.nocc*2
return from_fcivec(fcivec, norb, nelec)
make_rdm1 = make_rdm1
make_rdm2 = make_rdm2
trans_rdm1 = trans_rdm1
as_scanner = as_scanner
def dump_chk(self, ci=None, frozen=None, mo_coeff=None, mo_occ=None):
if not self.chkfile:
return self
if ci is None: ci = self.ci
if frozen is None: frozen = self.frozen
ci_chk = {'e_corr': self.e_corr,
'ci': ci,
'frozen': frozen}
if mo_coeff is not None: ci_chk['mo_coeff'] = mo_coeff
if mo_occ is not None: ci_chk['mo_occ'] = mo_occ
if self._nmo is not None: ci_chk['_nmo'] = self._nmo
if self._nocc is not None: ci_chk['_nocc'] = self._nocc
lib.chkfile.save(self.chkfile, 'cisd', ci_chk)
def amplitudes_to_cisdvec(self, c0, c1, c2):
return amplitudes_to_cisdvec(c0, c1, c2)
def cisdvec_to_amplitudes(self, civec, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return cisdvec_to_amplitudes(civec, nmo, nocc)
def density_fit(self):
raise NotImplementedError
def nuc_grad_method(self):
from pyscf.grad import cisd
return cisd.Gradients(self)
class RCISD(CISD):
pass
from pyscf import scf
scf.hf.RHF.CISD = lib.class_as_method(RCISD)
scf.rohf.ROHF.CISD = None
def _cp(a):
return numpy.array(a, copy=False, order='C')
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import fci
from pyscf import ao2mo
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = 'sto3g'
mol.build()
mf = scf.RHF(mol).run()
myci = CISD(mf)
eris = ccsd._make_eris_outcore(myci, mf.mo_coeff)
ecisd, civec = myci.kernel(eris=eris)
print(ecisd - -0.048878084082066106)
nmo = myci.nmo
nocc = myci.nocc
rdm1 = myci.make_rdm1(civec)
rdm2 = myci.make_rdm2(civec)
h1e = reduce(numpy.dot, (mf.mo_coeff.T, mf.get_hcore(), mf.mo_coeff))
h2e = ao2mo.kernel(mf._eri, mf.mo_coeff)
h2e = ao2mo.restore(1, h2e, nmo)
e2 = (numpy.einsum('ij,ji', h1e, rdm1) +
numpy.einsum('ijkl,ijkl', h2e, rdm2) * .5)
print(ecisd + mf.e_tot - mol.energy_nuc() - e2) # = 0
print(abs(rdm1 - numpy.einsum('ijkk->ji', rdm2)/(mol.nelectron-1)).sum())
|
run-with-webcam.py
|
# import the necessary packages
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from imutils.video import VideoStream
import numpy as np
import imutils
import cv2
import os
from playsound import playsound
import threading
from datetime import datetime
import mysql.connector
from time import gmtime, strftime
from ftplib import FTP
#DB Config
host = "localhost"
username = "root"
password = ""
database_name = "facemask"
mydb = mysql.connector.connect(
host="{}".format(host),
user="{}".format(username),
passwd="{}".format(password),
database="{}".format(database_name)
)
#FTP Config
ftp_host = ""
ftp_username = ""
ftp_password = ""
with open("ftp-credentials.txt") as file:
for line in file:
if line.strip().split("=")[0] == "host":
ftp_host = line.strip().split("=")[1]
elif line.strip().split("=")[0] == "username":
ftp_username = line.strip().split("=")[1]
elif line.strip().split("=")[0] == "password":
ftp_password = line.strip().split("=")[1]
ftp = FTP(ftp_host)
ftp.login(user=ftp_username,passwd=ftp_password)
def greeting_function():
currentHour = int(datetime.today().strftime('%H'))
basicGreeting = ""
if currentHour >= 0 and currentHour < 12:
basicGreeting = "morning"
if currentHour >= 12 and currentHour < 18:
basicGreeting = "afternoon"
if currentHour >= 18 and currentHour != 0:
basicGreeting = "evening"
return basicGreeting
def get_Center(x, y, w, h):
x1 = int(w / 2)
y1 = int(h / 2)
cx = x + x1
cy = y + y1
return cx, cy
ROI = 300
offset = 8
global_current_date = datetime.today().strftime('%Y-%m-%d')
lang = ""
SelectDataCursor = mydb.cursor()
SelectDataCursor.execute("SELECT lang FROM language")
collecttedData = SelectDataCursor.fetchall()
for data in collecttedData:
lang = data[0]
without_mask_sound = ""
if lang == "tamil":
without_mask_sound = "sound-effect\\without_mask\\tamil.mp3"
elif lang == "sinhala":
without_mask_sound = "sound-effect\\without_mask\\sinhala.mp3"
elif lang == "english":
without_mask_sound = "sound-effect\\without_mask\\english.mp3"
is_sound_playing = False
def soundPlay(sound_path):
global is_sound_playing
is_sound_playing = True
playsound(sound_path)
is_sound_playing = False
def without_mask_detected(frame):
global is_sound_playing, without_mask_sound
cv2.imwrite("cache-img.jpg",frame)
current_time = datetime.today().strftime('%I:%M:%S %p')
file_name = "{}_{}.jpg".format(global_current_date,current_time.replace(":","-"))
ftp.storbinary('STOR '+"{}".format(file_name), open("cache-img.jpg", 'rb'))
sqlCode = "INSERT INTO data (auto_id, _date, _time, image_path) VALUES (%s, %s, %s, %s)"
values = ("", global_current_date,current_time,file_name)
insertCursor = mydb.cursor()
insertCursor.execute(sqlCode,values)
mydb.commit()
if is_sound_playing == False:
soundPlay(without_mask_sound)
def with_mask_detected():
global is_sound_playing
if is_sound_playing == False:
greeting = greeting_function()
if greeting == "morning":
if lang == "tamil":
soundPlay("sound-effect\\with_mask\\tamil_morning.mp3")
elif lang == "sinhala":
soundPlay("sound-effect\\with_mask\\sinhala_morning.mp3")
elif lang == "english":
soundPlay("sound-effect\\with_mask\\english_morning.mp3")
elif greeting == "afternoon":
if lang == "tamil":
soundPlay("sound-effect\\with_mask\\tamil_afternoon.mp3")
elif lang == "sinhala":
soundPlay("sound-effect\\with_mask\\sinhala_afternoon.mp3")
elif lang == "english":
soundPlay("sound-effect\\with_mask\\english_afternoon.mp3")
elif greeting == "evening":
if lang == "tamil":
soundPlay("sound-effect\\with_mask\\tamil_evening.mp3")
elif lang == "sinhala":
soundPlay("sound-effect\\with_mask\\sinhala_evening.mp3")
elif lang == "english":
soundPlay("sound-effect\\with_mask\\english_evening.mp3")
def detect_and_predict_mask(frame, faceNet, maskNet):
# grab the dimensions of the frame and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
faceNet.setInput(blob)
detections = faceNet.forward()
#print(detections.shape)
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
preds = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence for face detections
if confidence > 0.3:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
# add the face and bounding boxes to their respective
# lists
faces.append(face)
locs.append((startX, startY, endX, endY))
# only make a predictions if at least one face was detected
if len(faces) > 0:
# for faster inference we'll make batch predictions on *all*
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
faces = np.array(faces, dtype="float32")
preds = maskNet.predict(faces, batch_size=32)
# return a 2-tuple of the face locations and their corresponding
# locations
return (locs, preds)
# load our serialized face detector model from disk
prototxtPath = r"face-detector\deploy.prototxt"
weightsPath = r"face-detector\res10_300x300_ssd_iter_140000.caffemodel"
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
maskNet = load_model("mask-detector.model")
# initialize the video stream
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
#vs = cv2.VideoCapture("video_1.mp4")
width_and_hieght = 850
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width = width_and_hieght)
copyFrame = frame.copy()
cv2.line(frame, (0 , ROI), (1200 , ROI), (0,255,255), 4) # Line
# detect faces in the frame and determine if they are wearing a
# face mask or not
(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)
# loop over the detected face locations and their corresponding
# locations
for (box, pred) in zip(locs, preds):
# unpack the bounding box and predictions
(xmin, ymin, xmax, ymax) = box
(mask, withoutMask) = pred
# determine the class label and color we'll use to draw
# the bounding box and text
if mask > withoutMask:
label = "Mask"
color = (0, 255, 0)
else:
label = "No Mask"
color = (0, 0, 255)
#label = "Mask" if mask > withoutMask else "No Mask"
#color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
# include the probability in the label
#label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
# display the label and bounding box rectangle on the output
# frame
cv2.putText(frame, label, (xmin, ymin - 10),
cv2.FONT_HERSHEY_SIMPLEX, 2, color, 2)
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
x = xmin
y = ymin
w = xmax - xmin
h = ymax - ymin
mid_point = get_Center(int(x), int(y), int(w),int(h))
cv2.circle(frame, (mid_point[0], mid_point[1]), 6, color, -1)
if mid_point[1] < (ROI + offset) and mid_point[1] > (ROI - offset):
if label == "Mask":
cv2.line(frame, (0 , ROI), (1200 , ROI), (0, 255, 0), 4)
withMask_threading = threading.Thread(target = with_mask_detected)
withMask_threading.start()
elif label == "No Mask":
cv2.line(frame, (0 , ROI), (1200 , ROI), (0, 0, 255), 4)
withoutMask_threading = threading.Thread(target = without_mask_detected, args=(copyFrame,))
withoutMask_threading.start()
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
ftp.quit()
|
test_dht_node.py
|
import asyncio
import heapq
import multiprocessing as mp
import random
import signal
from itertools import product
from typing import List, Sequence, Tuple
import numpy as np
import pytest
from multiaddr import Multiaddr
import hivemind
from hivemind import get_dht_time
from hivemind.dht.node import DHTID, DHTNode
from hivemind.dht.protocol import DHTProtocol
from hivemind.dht.storage import DictionaryDHTValue
from hivemind.p2p import P2P, PeerID
from hivemind.utils.logging import get_logger
from test_utils.dht_swarms import launch_swarm_in_separate_processes, launch_star_shaped_swarm
logger = get_logger(__name__)
def maddrs_to_peer_ids(maddrs: List[Multiaddr]) -> List[PeerID]:
return list({PeerID.from_base58(maddr["p2p"]) for maddr in maddrs})
def run_protocol_listener(
dhtid: DHTID, maddr_conn: mp.connection.Connection, initial_peers: Sequence[Multiaddr]
) -> None:
loop = asyncio.get_event_loop()
p2p = loop.run_until_complete(P2P.create(initial_peers=initial_peers))
visible_maddrs = loop.run_until_complete(p2p.get_visible_maddrs())
protocol = loop.run_until_complete(
DHTProtocol.create(p2p, dhtid, bucket_size=20, depth_modulo=5, num_replicas=3, wait_timeout=5)
)
logger.info(f"Started peer id={protocol.node_id} visible_maddrs={visible_maddrs}")
for peer_id in maddrs_to_peer_ids(initial_peers):
loop.run_until_complete(protocol.call_ping(peer_id))
maddr_conn.send((p2p.id, visible_maddrs))
async def shutdown():
await p2p.shutdown()
logger.info(f"Finished peer id={protocol.node_id} maddrs={visible_maddrs}")
loop.stop()
loop.add_signal_handler(signal.SIGTERM, lambda: loop.create_task(shutdown()))
loop.run_forever()
def launch_protocol_listener(
initial_peers: Sequence[Multiaddr] = (),
) -> Tuple[DHTID, mp.Process, PeerID, List[Multiaddr]]:
remote_conn, local_conn = mp.Pipe()
dht_id = DHTID.generate()
process = mp.Process(target=run_protocol_listener, args=(dht_id, remote_conn, initial_peers), daemon=True)
process.start()
peer_id, visible_maddrs = local_conn.recv()
return dht_id, process, peer_id, visible_maddrs
# note: we run network-related tests in a separate process to re-initialize all global states from scratch
# this helps us avoid undesirable gRPC side-effects (e.g. segfaults) when running multiple tests in sequence
@pytest.mark.forked
def test_dht_protocol():
peer1_node_id, peer1_proc, peer1_id, peer1_maddrs = launch_protocol_listener()
peer2_node_id, peer2_proc, peer2_id, _ = launch_protocol_listener(initial_peers=peer1_maddrs)
loop = asyncio.get_event_loop()
for client_mode in [True, False]: # note: order matters, this test assumes that first run uses client mode
peer_id = DHTID.generate()
p2p = loop.run_until_complete(P2P.create(initial_peers=peer1_maddrs))
protocol = loop.run_until_complete(
DHTProtocol.create(
p2p, peer_id, bucket_size=20, depth_modulo=5, wait_timeout=5, num_replicas=3, client_mode=client_mode
)
)
logger.info(f"Self id={protocol.node_id}")
assert loop.run_until_complete(protocol.call_ping(peer1_id)) == peer1_node_id
key, value, expiration = DHTID.generate(), [random.random(), {"ololo": "pyshpysh"}], get_dht_time() + 1e3
store_ok = loop.run_until_complete(
protocol.call_store(peer1_id, [key], [hivemind.MSGPackSerializer.dumps(value)], expiration)
)
assert all(store_ok), "DHT rejected a trivial store"
# peer 1 must know about peer 2
(recv_value_bytes, recv_expiration), nodes_found = loop.run_until_complete(
protocol.call_find(peer1_id, [key])
)[key]
recv_value = hivemind.MSGPackSerializer.loads(recv_value_bytes)
(recv_id, recv_peer_id) = next(iter(nodes_found.items()))
assert (
recv_id == peer2_node_id and recv_peer_id == peer2_id
), f"expected id={peer2_node_id}, peer={peer2_id} but got {recv_id}, {recv_peer_id}"
assert recv_value == value and recv_expiration == expiration, (
f"call_find_value expected {value} (expires by {expiration}) "
f"but got {recv_value} (expires by {recv_expiration})"
)
# peer 2 must know about peer 1, but not have a *random* nonexistent value
dummy_key = DHTID.generate()
empty_item, nodes_found_2 = loop.run_until_complete(protocol.call_find(peer2_id, [dummy_key]))[dummy_key]
assert empty_item is None, "Non-existent keys shouldn't have values"
(recv_id, recv_peer_id) = next(iter(nodes_found_2.items()))
assert (
recv_id == peer1_node_id and recv_peer_id == peer1_id
), f"expected id={peer1_node_id}, peer={peer1_id} but got {recv_id}, {recv_peer_id}"
# cause a non-response by querying a nonexistent peer
assert loop.run_until_complete(protocol.call_find(PeerID.from_base58("fakeid"), [key])) is None
# store/get a dictionary with sub-keys
nested_key, subkey1, subkey2 = DHTID.generate(), "foo", "bar"
value1, value2 = [random.random(), {"ololo": "pyshpysh"}], "abacaba"
assert loop.run_until_complete(
protocol.call_store(
peer1_id,
keys=[nested_key],
values=[hivemind.MSGPackSerializer.dumps(value1)],
expiration_time=[expiration],
subkeys=[subkey1],
)
)
assert loop.run_until_complete(
protocol.call_store(
peer1_id,
keys=[nested_key],
values=[hivemind.MSGPackSerializer.dumps(value2)],
expiration_time=[expiration + 5],
subkeys=[subkey2],
)
)
(recv_dict, recv_expiration), nodes_found = loop.run_until_complete(
protocol.call_find(peer1_id, [nested_key])
)[nested_key]
assert isinstance(recv_dict, DictionaryDHTValue)
assert len(recv_dict.data) == 2 and recv_expiration == expiration + 5
assert recv_dict.data[subkey1] == (protocol.serializer.dumps(value1), expiration)
assert recv_dict.data[subkey2] == (protocol.serializer.dumps(value2), expiration + 5)
if not client_mode:
loop.run_until_complete(p2p.shutdown())
peer1_proc.terminate()
peer2_proc.terminate()
@pytest.mark.forked
def test_empty_table():
"""Test RPC methods with empty routing table"""
peer_id, peer_proc, peer_peer_id, peer_maddrs = launch_protocol_listener()
loop = asyncio.get_event_loop()
p2p = loop.run_until_complete(P2P.create(initial_peers=peer_maddrs))
protocol = loop.run_until_complete(
DHTProtocol.create(
p2p, DHTID.generate(), bucket_size=20, depth_modulo=5, wait_timeout=5, num_replicas=3, client_mode=True
)
)
key, value, expiration = DHTID.generate(), [random.random(), {"ololo": "pyshpysh"}], get_dht_time() + 1e3
empty_item, nodes_found = loop.run_until_complete(protocol.call_find(peer_peer_id, [key]))[key]
assert empty_item is None and len(nodes_found) == 0
assert all(
loop.run_until_complete(
protocol.call_store(peer_peer_id, [key], [hivemind.MSGPackSerializer.dumps(value)], expiration)
)
), "peer rejected store"
(recv_value_bytes, recv_expiration), nodes_found = loop.run_until_complete(
protocol.call_find(peer_peer_id, [key])
)[key]
recv_value = hivemind.MSGPackSerializer.loads(recv_value_bytes)
assert len(nodes_found) == 0
assert recv_value == value and recv_expiration == expiration
assert loop.run_until_complete(protocol.call_ping(peer_peer_id)) == peer_id
assert loop.run_until_complete(protocol.call_ping(PeerID.from_base58("fakeid"))) is None
peer_proc.terminate()
@pytest.mark.forked
def test_dht_node(
n_peers: int = 20, n_sequential_peers: int = 5, parallel_rpc: int = 10, bucket_size: int = 5, num_replicas: int = 3
):
# step A: create a swarm of 50 dht nodes in separate processes
# (first 5 created sequentially, others created in parallel)
processes, dht, swarm_maddrs = launch_swarm_in_separate_processes(
n_peers=n_peers, n_sequential_peers=n_sequential_peers, bucket_size=bucket_size, num_replicas=num_replicas
)
# step B: run 51-st node in this process
loop = asyncio.get_event_loop()
initial_peers = random.choice(swarm_maddrs)
me = loop.run_until_complete(
DHTNode.create(
initial_peers=initial_peers,
parallel_rpc=parallel_rpc,
bucket_size=bucket_size,
num_replicas=num_replicas,
cache_refresh_before_expiry=False,
)
)
# test 1: find self
nearest = loop.run_until_complete(me.find_nearest_nodes([me.node_id], k_nearest=1))[me.node_id]
assert len(nearest) == 1 and nearest[me.node_id] == me.peer_id
# test 2: find others
for _ in range(10):
ref_peer_id, query_id = random.choice(list(dht.items()))
nearest = loop.run_until_complete(me.find_nearest_nodes([query_id], k_nearest=1))[query_id]
assert len(nearest) == 1
found_node_id, found_peer_id = next(iter(nearest.items()))
assert found_node_id == query_id and found_peer_id == ref_peer_id
# test 3: find neighbors to random nodes
accuracy_numerator = accuracy_denominator = 0 # top-1 nearest neighbor accuracy
jaccard_numerator = jaccard_denominator = 0 # jaccard similarity aka intersection over union
all_node_ids = list(dht.values())
for _ in range(20):
query_id = DHTID.generate()
k_nearest = random.randint(1, 10)
exclude_self = random.random() > 0.5
nearest = loop.run_until_complete(
me.find_nearest_nodes([query_id], k_nearest=k_nearest, exclude_self=exclude_self)
)[query_id]
nearest_nodes = list(nearest) # keys from ordered dict
assert len(nearest_nodes) == k_nearest, "beam search must return exactly k_nearest results"
assert me.node_id not in nearest_nodes or not exclude_self, "if exclude, results shouldn't contain self"
assert np.all(np.diff(query_id.xor_distance(nearest_nodes)) >= 0), "results must be sorted by distance"
ref_nearest = heapq.nsmallest(k_nearest + 1, all_node_ids, key=query_id.xor_distance)
if exclude_self and me.node_id in ref_nearest:
ref_nearest.remove(me.node_id)
if len(ref_nearest) > k_nearest:
ref_nearest.pop()
accuracy_numerator += nearest_nodes[0] == ref_nearest[0]
accuracy_denominator += 1
jaccard_numerator += len(set.intersection(set(nearest_nodes), set(ref_nearest)))
jaccard_denominator += k_nearest
accuracy = accuracy_numerator / accuracy_denominator
logger.debug(f"Top-1 accuracy: {accuracy}") # should be 98-100%
jaccard_index = jaccard_numerator / jaccard_denominator
logger.debug(f"Jaccard index (intersection over union): {jaccard_index}") # should be 95-100%
assert accuracy >= 0.9, f"Top-1 accuracy only {accuracy} ({accuracy_numerator} / {accuracy_denominator})"
assert jaccard_index >= 0.9, f"Jaccard index only {accuracy} ({accuracy_numerator} / {accuracy_denominator})"
# test 4: find all nodes
dummy = DHTID.generate()
nearest = loop.run_until_complete(me.find_nearest_nodes([dummy], k_nearest=len(dht) + 100))[dummy]
assert len(nearest) == len(dht) + 1
assert len(set.difference(set(nearest.keys()), set(all_node_ids) | {me.node_id})) == 0
# test 5: node without peers
detached_node = loop.run_until_complete(DHTNode.create())
nearest = loop.run_until_complete(detached_node.find_nearest_nodes([dummy]))[dummy]
assert len(nearest) == 1 and nearest[detached_node.node_id] == detached_node.peer_id
nearest = loop.run_until_complete(detached_node.find_nearest_nodes([dummy], exclude_self=True))[dummy]
assert len(nearest) == 0
# test 6: store and get value
true_time = get_dht_time() + 1200
assert loop.run_until_complete(me.store("mykey", ["Value", 10], true_time))
initial_peers = random.choice(swarm_maddrs)
that_guy = loop.run_until_complete(
DHTNode.create(
initial_peers=initial_peers,
parallel_rpc=parallel_rpc,
cache_refresh_before_expiry=False,
cache_locally=False,
)
)
for node in [me, that_guy]:
val, expiration_time = loop.run_until_complete(node.get("mykey"))
assert val == ["Value", 10], "Wrong value"
assert expiration_time == true_time, f"Wrong time"
assert loop.run_until_complete(detached_node.get("mykey")) is None
# test 7: bulk store and bulk get
keys = "foo", "bar", "baz", "zzz"
values = 3, 2, "batman", [1, 2, 3]
store_ok = loop.run_until_complete(me.store_many(keys, values, expiration_time=get_dht_time() + 999))
assert all(store_ok.values()), "failed to store one or more keys"
response = loop.run_until_complete(me.get_many(keys[::-1]))
for key, value in zip(keys, values):
assert key in response and response[key][0] == value
# test 8: store dictionaries as values (with sub-keys)
upper_key, subkey1, subkey2, subkey3 = "ololo", "k1", "k2", "k3"
now = get_dht_time()
assert loop.run_until_complete(me.store(upper_key, subkey=subkey1, value=123, expiration_time=now + 10))
assert loop.run_until_complete(me.store(upper_key, subkey=subkey2, value=456, expiration_time=now + 20))
for node in [that_guy, me]:
value, time = loop.run_until_complete(node.get(upper_key))
assert isinstance(value, dict) and time == now + 20
assert value[subkey1] == (123, now + 10)
assert value[subkey2] == (456, now + 20)
assert len(value) == 2
assert not loop.run_until_complete(me.store(upper_key, subkey=subkey2, value=345, expiration_time=now + 10))
assert loop.run_until_complete(me.store(upper_key, subkey=subkey2, value=567, expiration_time=now + 30))
assert loop.run_until_complete(me.store(upper_key, subkey=subkey3, value=890, expiration_time=now + 50))
loop.run_until_complete(asyncio.sleep(0.1)) # wait for cache to refresh
for node in [that_guy, me]:
value, time = loop.run_until_complete(node.get(upper_key))
assert isinstance(value, dict) and time == now + 50, (value, time)
assert value[subkey1] == (123, now + 10)
assert value[subkey2] == (567, now + 30)
assert value[subkey3] == (890, now + 50)
assert len(value) == 3
for proc in processes:
proc.terminate()
# The nodes don't own their hivemind.p2p.P2P instances, so we shutdown them separately
loop.run_until_complete(asyncio.wait([node.shutdown() for node in [me, detached_node, that_guy]]))
@pytest.mark.forked
@pytest.mark.asyncio
async def test_dhtnode_replicas():
num_replicas = random.randint(1, 20)
peers = await launch_star_shaped_swarm(n_peers=20, num_replicas=num_replicas)
you = random.choice(peers)
assert await you.store("key1", "foo", get_dht_time() + 999)
actual_key1_replicas = sum(len(peer.protocol.storage) for peer in peers)
assert num_replicas == actual_key1_replicas
assert await you.store("key2", "bar", get_dht_time() + 999)
total_size = sum(len(peer.protocol.storage) for peer in peers)
actual_key2_replicas = total_size - actual_key1_replicas
assert num_replicas == actual_key2_replicas
assert await you.store("key2", "baz", get_dht_time() + 1000)
assert sum(len(peer.protocol.storage) for peer in peers) == total_size, "total size should not have changed"
@pytest.mark.forked
@pytest.mark.asyncio
async def test_dhtnode_caching(T=0.05):
node2 = await DHTNode.create(cache_refresh_before_expiry=5 * T, reuse_get_requests=False)
node1 = await DHTNode.create(
initial_peers=await node2.protocol.p2p.get_visible_maddrs(),
cache_refresh_before_expiry=5 * T,
client_mode=True,
reuse_get_requests=False,
)
await node2.store("k", [123, "value"], expiration_time=hivemind.get_dht_time() + 7 * T)
await node2.store("k2", [654, "value"], expiration_time=hivemind.get_dht_time() + 7 * T)
await node2.store("k3", [654, "value"], expiration_time=hivemind.get_dht_time() + 15 * T)
await node1.get_many(["k", "k2", "k3", "k4"])
assert len(node1.protocol.cache) == 3
assert len(node1.cache_refresh_queue) == 0
await node1.get_many(["k", "k2", "k3", "k4"])
assert len(node1.cache_refresh_queue) == 3
await node2.store("k", [123, "value"], expiration_time=hivemind.get_dht_time() + 12 * T)
await asyncio.sleep(4 * T)
await node1.get("k")
await asyncio.sleep(1 * T)
assert len(node1.protocol.cache) == 3
assert len(node1.cache_refresh_queue) == 2
await asyncio.sleep(3 * T)
assert len(node1.cache_refresh_queue) == 1
await asyncio.sleep(5 * T)
assert len(node1.cache_refresh_queue) == 0
await asyncio.sleep(5 * T)
assert len(node1.cache_refresh_queue) == 0
await node2.store("k", [123, "value"], expiration_time=hivemind.get_dht_time() + 10 * T)
await node1.get("k")
await asyncio.sleep(1 * T)
assert len(node1.cache_refresh_queue) == 0
await node1.get("k")
await asyncio.sleep(1 * T)
assert len(node1.cache_refresh_queue) == 1
await asyncio.sleep(5 * T)
assert len(node1.cache_refresh_queue) == 0
await asyncio.gather(node1.shutdown(), node2.shutdown())
@pytest.mark.forked
@pytest.mark.asyncio
async def test_dhtnode_reuse_get():
peers = await launch_star_shaped_swarm(n_peers=10, parallel_rpc=256)
await asyncio.gather(
random.choice(peers).store("k1", 123, hivemind.get_dht_time() + 999),
random.choice(peers).store("k2", 567, hivemind.get_dht_time() + 999),
)
you = random.choice(peers)
futures1 = await you.get_many(["k1", "k2"], return_futures=True)
assert len(you.pending_get_requests[DHTID.generate("k1")]) == 1
assert len(you.pending_get_requests[DHTID.generate("k2")]) == 1
futures2 = await you.get_many(["k2", "k3"], return_futures=True)
assert len(you.pending_get_requests[DHTID.generate("k2")]) == 2
await asyncio.gather(*futures1.values(), *futures2.values())
futures3 = await you.get_many(["k3"], return_futures=True)
assert len(you.pending_get_requests[DHTID.generate("k1")]) == 0
assert len(you.pending_get_requests[DHTID.generate("k2")]) == 0
assert len(you.pending_get_requests[DHTID.generate("k3")]) == 1
assert (await futures1["k1"])[0] == 123
assert await futures1["k2"] == await futures2["k2"] and (await futures1["k2"])[0] == 567
assert await futures2["k3"] == await futures3["k3"] and (await futures3["k3"]) is None
@pytest.mark.forked
@pytest.mark.asyncio
async def test_dhtnode_blacklist():
node1, node2, node3, node4 = await launch_star_shaped_swarm(n_peers=4, blacklist_time=999)
assert await node2.store("abc", 123, expiration_time=hivemind.get_dht_time() + 99)
assert len(node2.blacklist.ban_counter) == 0
await asyncio.gather(node3.shutdown(), node4.shutdown())
assert await node2.store("def", 456, expiration_time=hivemind.get_dht_time() + 99)
assert set(node2.blacklist.ban_counter.keys()) == {node3.peer_id, node4.peer_id}
assert await node1.get("abc", latest=True) # force node1 to crawl dht and discover unresponsive peers
assert node3.peer_id in node1.blacklist
assert await node1.get("abc", latest=True) # force node1 to crawl dht and discover unresponsive peers
assert node2.peer_id not in node1.blacklist
await asyncio.gather(node1.shutdown(), node2.shutdown())
@pytest.mark.forked
@pytest.mark.asyncio
async def test_dhtnode_edge_cases():
peers = await launch_star_shaped_swarm(n_peers=4, parallel_rpc=4)
subkeys = [0, "", False, True, "abyrvalg", 4555]
keys = subkeys + [()]
values = subkeys + [[]]
for key, subkey, value in product(keys, subkeys, values):
await random.choice(peers).store(
key=key, subkey=subkey, value=value, expiration_time=hivemind.get_dht_time() + 999
),
stored = await random.choice(peers).get(key=key, latest=True)
assert stored is not None
assert subkey in stored.value
assert stored.value[subkey].value == value
await asyncio.wait([node.shutdown() for node in peers])
|
hfbs_fpga.py
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
import os
import time
import datetime
import argparse
from multiprocessing import Process, Queue
# local imports
import utils
#flags
RUN_FPGA = False
VERBOSE = True
FIXEDP = True
LOOP_OUTSIDE = False
NO_GARBAGE_ADDED = True
SAVE_FIG = False
DISPLAY_FIG = True
TWO_FLOW = False
# settings
EPS = np.finfo(float).eps
np.set_printoptions(threshold=np.nan)
# constants
BILATERAL_SIGMA_LUMA = 32
BILATERAL_SIGMA_SPATIAL = 32
LOSS_SMOOTH_MULT = 8
A_VERT_DIAG_MIN = 1e-3
NUM_PCG_ITERS = 100
NUM_NEIGHBORS = 6
reference = '../data/depth_superres/reference.png'
target = '../data/depth_superres/target.png'
confidence = '../data/depth_superres/confidence.png'
# FPGA-specific constants, yours may be different
c2h_str = "/dev/xdma/card0/c2h0"
h2c_str = "/dev/xdma/card0/h2c0"
def read_from_fpga(fpga_read_channel, num_bytes, my_queue):
if not LOOP_OUTSIDE:
print("reading (num_bytes-8)/5)+8", 8+(num_bytes-8)/5)
data_readback = os.read(fpga_read_channel, ((num_bytes-8)/5)+8)
else:
if NO_GARBAGE_ADDED:
print "reading 3*(num_bytes/10)", 3*(num_bytes/10)
data_readback = os.read(fpga_read_channel, 3*(num_bytes/10))
else:
data_readback = os.read(fpga_read_channel, num_bytes)
#data_readback = os.read(fpga_read_channel, 8)
print "\tdone reading, putting"
my_queue.put(data_readback)
return
def invoke_fpga(im):
result = ''
c2h_fd = 0
h2c_fd = 0
new_im = ''.join([utils.to_bytestr(num) for num in im])
try:
# open up read and write channels
h2c_fd = os.open(h2c_str, os.O_RDWR)
c2h_fd = os.open(c2h_str, os.O_RDWR)
bytes_left = im.size
result_queue = Queue()
# processes to handle r/w channels
read_process = Process(target=read_from_fpga, args=(c2h_fd,len(new_im), result_queue))
# write_process = Process(target=write_to_fpga, args=(h2c_fd,buffer(im, im.size - bytes_left)))
# open up rx
read_process.start()
print "started read"
time.sleep(.5) # wait for the read to start TODO do this in the correct way (maybe wait until poll != 0 )
# open up tx, tx should trigger rx
print "writing"
if FIXEDP:
bytes_written = os.write(h2c_fd, buffer(new_im, 0))
else:
bytes_written = os.write(h2c_fd, buffer(im, 0))
print "bytes_written:", bytes_written
# wait for done
proc_out = result_queue.get()
print "got proc out"
read_process.join()
print "read process joined"
result = proc_out
finally:
os.close(c2h_fd)
os.close(h2c_fd)
if RUN_FPGA:
if FIXEDP:
new_result = [k for k in [result[i : i + 8] for i in range(0, len(result), 8)]]
result = [utils.from_bytestr(k) for k in new_result]
else:
result = np.frombuffer(result, dtype=np.float64).reshape(im.shape)
return np.around(np.array(result),decimals=32) # round for extreme vals
def prepare_flow(reference, flow_tuple, confidence):
reference_image = np.array(plt.imread(reference, format='png'), dtype=np.float32)*256
flow_a = np.array(plt.imread(flow_tuple[0], format='png'), dtype=np.float32)*65536
if TWO_FLOW:
flow_b = np.array(plt.imread(flow_tuple[1], format='png'), dtype=np.float32)*65536
flow = np.stack((flow_a,flow_b),axis=-1)
else:
flow = flow_a
flow = np.subtract(flow, 2**15)
flow = np.divide(flow, 256)
weight = np.array(plt.imread(confidence, format='png'), dtype=np.float32)*256*256
weight = np.divide(weight, 65536)
if VERBOSE:
print(">>> preparing flow data")
sz = [reference_image.shape[0], reference_image.shape[1]]
I_x = np.tile(np.floor(np.divide(np.arange(sz[1]), BILATERAL_SIGMA_SPATIAL)), (sz[0],1))
I_y = np.tile( np.floor(np.divide(np.arange(sz[0]), BILATERAL_SIGMA_SPATIAL)).reshape(1,-1).T, (1,sz[1]) )
I_luma = np.floor_divide(utils.rgb2gray(reference_image), float(BILATERAL_SIGMA_LUMA))
X = np.concatenate((I_x[:,:,None],I_y[:,:,None],I_luma[:,:,None]),axis=2).reshape((-1,3),order='F')
W0 = np.ravel(weight.T)
if TWO_FLOW:
X0 = np.reshape(flow,[-1,2],order='F')
else:
X0 = np.reshape(flow,[-1,1],order='F')
return X, W0, X0, flow_a.shape
def bistochastize(splat_mat, grid_size, diffuse3_mat, W0, Xshape):
splat_sum = np.reshape( splat_mat * np.ones(Xshape), grid_size,order='F').astype('float32')
splat_norm = np.ones(splat_sum.shape).astype('float32')
for i_norm in range(20):
diffuse_splat_norm = np.reshape(diffuse3_mat * np.reshape(splat_norm, -1, 1), splat_norm.shape,order='F')
blurred_splat_norm = 2 * splat_norm + diffuse_splat_norm
denom = np.maximum(EPS, blurred_splat_norm)
splat_norm = np.sqrt(splat_norm * (splat_sum / denom))
A_diag = np.reshape((splat_mat * W0), grid_size,order='F') + LOSS_SMOOTH_MULT * (splat_sum - 2 * np.square(splat_norm))
return splat_norm, A_diag
def make_neighbors(grid_size):
ii,jj,kk = np.mgrid[0:grid_size[0],0:grid_size[1],0:grid_size[2]]
idxs0 = np.array([], dtype=np.int64).reshape(0,2)
idxs = np.array([], dtype=np.int64).reshape(0,2)
if VERBOSE:
print(">>> neighbors")
for diff in [-1,1]:
ii_ = ii + diff
jj_ = jj + diff
kk_ = kk + diff
# first ii
idx = np.array([ii[(0 <= ii_) & (ii_ < grid_size[0])],
jj[(0 <= ii_) & (ii_ < grid_size[0])],
kk[(0 <= ii_) & (ii_ < grid_size[0])]]).T
idx2ravel = np.array([np.ravel_multi_index(item,dims=grid_size,order='F') for item in idx])
idx_ = np.array([ii_[(0 <= ii_) & (ii_ < grid_size[0])],
jj[(0 <= ii_) & (ii_ < grid_size[0])],
kk[(0 <= ii_) & (ii_ < grid_size[0])]]).T
idx_2ravel = np.array([np.ravel_multi_index(item,dims=grid_size,order='F') for item in idx_])
idxs = np.vstack( [idxs, np.array([idx2ravel,idx_2ravel]).T ] )
# then jj
idx = np.array([ii[(0 <= jj_) & (jj_ < grid_size[1])],
jj[(0 <= jj_) & (jj_ < grid_size[1])],
kk[(0 <= jj_) & (jj_ < grid_size[1])]]).T
idx2ravel = np.array([np.ravel_multi_index(item,dims=grid_size,order='F') for item in idx])
idx_ = np.array([ii[(0 <= jj_) & (jj_ < grid_size[1])],
jj_[(0 <= jj_) & (jj_ < grid_size[1])],
kk[(0 <= jj_) & (jj_ < grid_size[1])]]).T
idx_2ravel = np.array([np.ravel_multi_index(item,dims=grid_size,order='F') for item in idx_])
idxs = np.vstack( [idxs, np.array([idx2ravel,idx_2ravel]).T ] )
# then kk
idx = np.array([ii[(0 <= kk_) & (kk_ < grid_size[2])],
jj[(0 <= kk_) & (kk_ < grid_size[2])],
kk[(0 <= kk_) & (kk_ < grid_size[2])]]).T
idx2ravel = np.array([np.ravel_multi_index(item,dims=grid_size,order='F') for item in idx])
idx_ = np.array([ii[(0 <= kk_) & (kk_ < grid_size[2])],
jj[(0 <= kk_) & (kk_ < grid_size[2])],
kk_[(0 <= kk_) & (kk_ < grid_size[2])]]).T
idx_2ravel = np.array([np.ravel_multi_index(item,dims=grid_size,order='F') for item in idx_])
idxs = np.vstack( [idxs, np.array([idx2ravel,idx_2ravel]).T ] )
return idxs
def dense_solve_jacobi(X, W0, X0):
if VERBOSE:
print(">>> starting bilateral grid optimization")
grid_size = (np.amax(X,axis=0) + np.ones(3)).astype('int64')
print grid_size
basis = np.insert(np.cumprod(grid_size)[0:-1],0,1)
grid_num_vertices = np.prod(grid_size)
grid_splat_indices = np.dot(X,basis)
if VERBOSE:
print(">>> building splat mat")
# this is where the info to build the sparse matrix is:
# http://stackoverflow.com/questions/7760937/issue-converting-matlab-sparse-code-to-numpy-scipy-with-csc-matrix
splat_ones = np.ones(grid_splat_indices.shape[0])
ij = ( grid_splat_indices-1, np.arange(0, grid_splat_indices.shape[0]) )
splat_mat_shape = ( np.prod(grid_size), X.shape[0] )
splat_mat = csr_matrix( (splat_ones, ij), shape=splat_mat_shape )
start_new_neighbors = time.time()
idxs = make_neighbors(grid_size)
if VERBOSE:
print("time neighbors: ", time.time() - start_new_neighbors)
d_shape = np.prod(grid_size)
diffuse_s = np.tile(1,idxs.shape[0])
diffuse3_mat = csr_matrix( (diffuse_s, (idxs[:,0],idxs[:,1])), shape=(d_shape,d_shape) )
if VERBOSE:
print(">>> bistochastization")
start_bistoch = time.time()
splat_norm, A_diag = bistochastize(splat_mat, grid_size, diffuse3_mat, W0, X.shape[0])
if VERBOSE:
print("time bistoch: ", time.time() - start_bistoch)
if TWO_FLOW:
XW0 = np.column_stack((np.multiply(W0,X0[:, 0]) , np.multiply(W0,X0[:, 1]))).astype('float32')
b_mat = np.reshape((splat_mat * XW0), np.hstack([grid_size,2]),order='F').astype('float32')
else:
XW0 = (W0*X0.T).T.astype('float32')
b_mat = np.reshape((splat_mat * XW0), grid_size,order='F').astype('float32')
#% Construct A, b, and c, which define our optimization problem
c = 0.5 * sum(XW0 * X0,1);
# initial solution
inv_a = 1/np.maximum(A_VERT_DIAG_MIN, A_diag)
#inv_a = np.divide(1,np.maximum(A_VERT_DIAG_MIN, A_diag)).astype('float32')
final_result_cpu = []
final_result_fpga = []
run_fpga_time = 0
# for each channel in the flow
if VERBOSE:
print("starting flow!")
if TWO_FLOW:
for channel in range(b_mat.shape[-1]):
if VERBOSE:
print("channel", channel)
b_c = b_mat[:,:,:,channel].astype('float32')
Y_c = np.multiply(b_c, inv_a)
Y_c_cpu = np.multiply(b_c, inv_a)
Y_c_fpga = np.multiply(b_c, inv_a).astype('float32')
cpu_time_sequential = 0
cpu_time_vector = 0
cpu_time_sequential_compute_only = 0
fpga_time = 0
start_vector = time.time()
if LOOP_OUTSIDE:
for iter in range(NUM_PCG_ITERS):
if VERBOSE:
print("iteration", iter)
start_vector = time.time()
Ysn = np.multiply(Y_c, splat_norm) # flow 1
Ysn_reshaped = np.reshape(Ysn,-1,1)
Ysn_r2 = np.reshape((diffuse3_mat * Ysn_reshaped), Ysn.shape, order='F')
temp_Y = (-1 * LOSS_SMOOTH_MULT * np.multiply(splat_norm , Ysn_r2 ))
#Y_c_cpu = np.multiply((b_c - (-1 * LOSS_SMOOTH_MULT * np.multiply(splat_norm , Ysn_r2 ) )) , inv_a)
Y_c_cpu = np.multiply((b_c - temp_Y) , inv_a)
Y_c = Y_c_cpu
total_time_vector = time.time() - start_vector #end timing
Y_c_cpu = Y_c
if VERBOSE:
print("\t CPU vector time: \t\t\t", total_time_vector)
if RUN_FPGA:
np_fpga_input = np.empty(np.prod(grid_size)*5)
fpga_elapsed = 0
npit = np.nditer(Y_c_fpga, flags=['f_index','multi_index'], order='F')
for x in npit:
# order: Y, 1/a, bc, splatnorm
#print npit.index, "\t", npit.multi_index
np_fpga_input[5*npit.index + 0] = Y_c_fpga[npit.multi_index]
np_fpga_input[5*npit.index + 1] = splat_norm[npit.multi_index]
np_fpga_input[5*npit.index + 2] = b_c[npit.multi_index]
np_fpga_input[5*npit.index + 3] = inv_a[npit.multi_index]
np_fpga_input[5*npit.index + 4] = Ysn[npit.multi_index]
run_fpga_time = time.time()
proc_output = invoke_fpga(np_fpga_input)
fpga_input = np.array(np_fpga_input).astype('float32')
processed_output = invoke_fpga(fpga_input)
if NO_GARBAGE_ADDED:
Y_c_fpga = processed_output[0::3].reshape(grid_size)
else:
Y_c_fpga = processed_output[0::10].reshape(grid_size)
else: # we are looping inside the FPGA, so we send the data once
start_vector = time.time()
for iter in range(NUM_PCG_ITERS):
Ysn = np.multiply(Y_c_cpu, splat_norm) # flow 1
Ysn_reshaped = np.reshape(Ysn,-1,1)
Ysn_r2 = np.reshape((diffuse3_mat * Ysn_reshaped), Ysn.shape, order='F')
temp_Y = (-1 * LOSS_SMOOTH_MULT * np.multiply(splat_norm , Ysn_r2 )).astype('float32')
Y_c_cpu = np.multiply((b_c - temp_Y) , inv_a).astype('float32')
total_time_vector = time.time() - start_vector #end timing
cpu_time_vector += total_time_vector
if VERBOSE:
print("\t CPU vector time: \t\t\t", total_time_vector)
np_fpga_input = np.empty(np.prod(grid_size)*5+1)
fpga_elapsed = 0
npit = np.nditer(Y_c_fpga, flags=['f_index','multi_index'], order='F')
np_fpga_input[0] = NUM_PCG_ITERS
for x in npit:
# order: Y, 1/a, bc, splatnorm
#print npit.index, "\t", npit.multi_index
np_fpga_input[5*npit.index + 0+1] = Y_c_fpga[npit.multi_index]
np_fpga_input[5*npit.index + 1+1] = splat_norm[npit.multi_index]
np_fpga_input[5*npit.index + 2+1] = b_c[npit.multi_index]
np_fpga_input[5*npit.index + 3+1] = inv_a[npit.multi_index]
np_fpga_input[5*npit.index + 4+1] = Ysn[npit.multi_index]
if RUN_FPGA:
run_fpga_time = time.time()
proc_output_2 = invoke_fpga(np_fpga_input)
cycles = proc_output_2[0]
print "cycles: ", cycles
proc_output = proc_output_2[1:]
Y_c_fpga = proc_output.reshape(grid_size,order='F')
fpga_runtime = time.time() - run_fpga_time
fpga_time +=fpga_runtime
final_result_cpu.append(Y_c_cpu)
final_result_fpga.append(Y_c_fpga)
if VERBOSE:
print("Channel",channel,"completed, total time:")
print("\t CPU vector: \t\t\t", cpu_time_vector)
print("\t FPGA: \t\t\t", fpga_time)
final_solution_cpu = np.stack((final_result_cpu[0], final_result_cpu[1]),axis=3)
final_solution_fpga = np.stack((final_result_fpga[0], final_result_fpga[1]),axis=3)
else:
b_c = b_mat[:,:,:].astype('float32')
Y_c = np.multiply(b_c, inv_a)
Y_c_cpu = np.multiply(b_c, inv_a)
Y_c_fpga = np.multiply(b_c, inv_a).astype('float32')
Ysn = np.multiply(Y_c, splat_norm)
cpu_time_sequential = 0
cpu_time_vector = 0
cpu_time_sequential_compute_only = 0
fpga_time = 0
start_vector = time.time()
if LOOP_OUTSIDE:
for iter in range(NUM_PCG_ITERS):
if VERBOSE:
print("iteration", iter)
start_vector = time.time()
Ysn = np.multiply(Y_c, splat_norm) # flow 1
Ysn_reshaped = np.reshape(Ysn,-1,1)
Ysn_r2 = np.reshape((diffuse3_mat * Ysn_reshaped), Ysn.shape, order='F')
temp_Y = (-1 * LOSS_SMOOTH_MULT * np.multiply(splat_norm , Ysn_r2 ))
#Y_c_cpu = np.multiply((b_c - (-1 * LOSS_SMOOTH_MULT * np.multiply(splat_norm , Ysn_r2 ) )) , inv_a)
Y_c_cpu = np.multiply((b_c - temp_Y) , inv_a)
Y_c = Y_c_cpu
total_time_vector = time.time() - start_vector #end timing
Y_c_cpu = Y_c
if VERBOSE:
print("\t CPU vector time: \t\t\t", total_time_vector)
if RUN_FPGA:
np_fpga_input = np.empty(np.prod(grid_size)*5)
fpga_elapsed = 0
npit = np.nditer(Y_c_fpga, flags=['f_index','multi_index'], order='F')
for x in npit:
# order: Y, 1/a, bc, splatnorm
#print npit.index, "\t", npit.multi_index
np_fpga_input[5*npit.index + 0] = Y_c_fpga[npit.multi_index]
np_fpga_input[5*npit.index + 1] = splat_norm[npit.multi_index]
np_fpga_input[5*npit.index + 2] = b_c[npit.multi_index]
np_fpga_input[5*npit.index + 3] = inv_a[npit.multi_index]
np_fpga_input[5*npit.index + 4] = Ysn[npit.multi_index]
run_fpga_time = time.time()
proc_output = invoke_fpga(np_fpga_input)
fpga_input = np.array(np_fpga_input).astype('float32')
processed_output = invoke_fpga(fpga_input)
if NO_GARBAGE_ADDED:
Y_c_fpga = processed_output[0::3].reshape(grid_size)
else:
Y_c_fpga = processed_output[0::10].reshape(grid_size)
else: # we are looping inside the FPGA, so we send the data once
start_vector = time.time()
# for iter in range(NUM_PCG_ITERS):
# Ysn = np.multiply(Y_c_cpu, splat_norm) # flow 1
# Ysn_reshaped = np.reshape(Ysn,-1,1)
# Ysn_r2 = np.reshape((diffuse3_mat * Ysn_reshaped), Ysn.shape, order='F')
# temp_Y = (-1 * LOSS_SMOOTH_MULT * np.multiply(splat_norm , Ysn_r2 )).astype('float32')
# Y_c_cpu = np.multiply((b_c - temp_Y) , inv_a).astype('float32')
total_time_vector = time.time() - start_vector #end timing
cpu_time_vector += total_time_vector
if VERBOSE:
print("\t CPU vector time: \t\t\t", total_time_vector)
np_fpga_input = np.empty(np.prod(grid_size)*5+1)
fpga_elapsed = 0
npit = np.nditer(Y_c_fpga, flags=['f_index','multi_index'], order='F')
np_fpga_input[0] = NUM_PCG_ITERS
for x in npit:
# order: Y, 1/a, bc, splatnorm
#print npit.index, "\t", npit.multi_index
np_fpga_input[5*npit.index + 0+1] = Y_c_fpga[npit.multi_index]
np_fpga_input[5*npit.index + 1+1] = splat_norm[npit.multi_index]
np_fpga_input[5*npit.index + 2+1] = b_c[npit.multi_index]
np_fpga_input[5*npit.index + 3+1] = inv_a[npit.multi_index]
np_fpga_input[5*npit.index + 4+1] = Ysn[npit.multi_index]
print(np_fpga_input.size)
if RUN_FPGA:
run_fpga_time = time.time()
proc_output_2 = invoke_fpga(np_fpga_input)
cycles = proc_output_2[0]
print "cycles: ", cycles
proc_output = proc_output_2[1:]
else:
proc_output = invoke_fpga(np_fpga_input)
# pdb.set_trace()
Y_c_fpga = proc_output.reshape(grid_size,order='F')
fpga_runtime = time.time() - run_fpga_time
fpga_time +=fpga_runtime
final_result_cpu.append(Y_c_cpu)
final_result_fpga.append(Y_c_fpga)
if VERBOSE:
print("completed, total time:")
print("\t CPU vector: \t\t\t", cpu_time_vector)
print("\t FPGA: \t\t\t", fpga_time)
final_solution_cpu = final_result_cpu
final_solution_fpga = final_result_fpga
if TWO_FLOW:
sliced_solution_cpu = (splat_mat.T) * (final_solution_cpu.reshape(grid_num_vertices,-1,order='F'))
sliced_solution_fpga = (splat_mat.T) * (final_solution_fpga.reshape(grid_num_vertices,-1,order='F'))
else:
sliced_solution_cpu = (splat_mat.T) * (final_solution_cpu[0].reshape(grid_num_vertices,-1,order='F'))
sliced_solution_fpga = (splat_mat.T) * (final_solution_fpga[0].reshape(grid_num_vertices,-1,order='F'))
return sliced_solution_cpu, sliced_solution_fpga # final result is an array with improved flows in both directions
def make_parser():
parser = argparse.ArgumentParser(description='Run the bilateral solver on some depth results.')
parser.add_argument('--fpga', '-f', action='store_true', default=False)
parser.add_argument('--quiet', '-q', action='store_false', default=False)
parser.add_argument('--small', '-s', action='store_true')
parser.add_argument('--fixedpoint', '-fp', action='store_true')
parser.add_argument('--loopoutside', '-lo', action='store_true')
parser.add_argument('--debug', '-d', action='store_true')
parser.add_argument('--nogarb', '-ng', action='store_true', default=False)
parser.add_argument('--savefig', '-sf', action='store_true')
parser.add_argument('--display', '-df', action='store_true')
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# loop over test images
# for imdir in [x[0] for x in os.walk('img/')][1:]:
# reference = imdir+'/reference.png'
# target = imdir+'/input.png'
# confidence = imdir+'/weight.png'
#imdir = 'img/Adirondack'
#reference = imdir+'/reference.png'
#target = imdir+'/input.png'
#confidence = imdir+'/weight.png'
# target = 'img/input0.png'
# reference = 'img/reference.png'
# confidence = 'img/weight.png'
input_X, input_W0, input_X0, flow_shape = prepare_flow(reference,(target,), confidence)
res_cpu, res_fpga = dense_solve_jacobi(input_X, input_W0, input_X0)
res_cpu = res_cpu.reshape(flow_shape[0], flow_shape[1], -1,order='F')
res_fpga = res_fpga.reshape(flow_shape[0], flow_shape[1], -1,order='F')
flow_a = np.array(plt.imread(target, format='png'), dtype=np.float32)
# flow_b = np.array(plt.imread(flow_b_img, format='png'), dtype=np.float32)
plt.subplot(3,2,1)
plt.imshow(flow_a)
plt.title('flow a input')
plt.colorbar()
# plt.subplot(3,2,2)
# plt.imshow(flow_b)
# plt.title('flow b input')
# plt.colorbar()
plt.subplot(3,2,3)
plt.imshow(res_cpu[:,:,0])
plt.title('CPU flow a output')
plt.colorbar()
# plt.subplot(3,2,4)
# plt.imshow(res_cpu[:,:,1])
# plt.title('CPU flow b output')
# plt.colorbar()
plt.subplot(3,2,5)
plt.imshow(res_fpga[:,:,0])
plt.title('FPGA flow a output')
plt.colorbar()
# plt.subplot(3,2,6)
# plt.imshow(res_fpga[:,:,1])
# plt.title('FPGA flow b output')
# plt.colorbar()
if SAVE_FIG:
timestr = time.strftime("%Y%m%d-%H%M%S")
plt.savefig('myfig'+timestr)
if DISPLAY_FIG:
plt.show()
if __name__ == "__main__":
main()
|
test_store.py
|
# Unit test suite for the RedisStore class.
# This test suite now runs in its own docker container. To build the image, run
# docker build -f Dockerfile-test -t abaco/testsuite .
# from within the tests directory.
#
# To run the tests execute, first start the development stack using:
# 1. export abaco_path=$(pwd)
# 2. docker-compose -f docker-compose-local-db.yml up -d (from within the root directory)
# 3. docker-compose -f docker-compose-local.yml up -d (from within the root directory)
# Then, also from the root directory, execute:
# docker run -e base_url=http://172.17.0.1:8000 -v $(pwd)/local-dev.conf:/etc/service.conf --entrypoint=py.test -it --rm abaco/testsuite:dev /tests/test_store.py
from _datetime import datetime
import pytest
import os
import sys
import threading
import time
import timeit
sys.path.append(os.path.split(os.getcwd())[0])
sys.path.append('/actors')
from config import Config
from store import MongoStore
store = 'mongo'
# this is the number of iterations executed in each thread, per test.
n = 500
@pytest.fixture(scope='session')
def st():
ms = MongoStore(Config.get('store', 'mongo_host'), Config.getint('store', 'mongo_port'), db='11')
# we want to recreate the index each time so we start off trying to drop it, but the first time we run
# after the db is instantiated the index won't exist.
try:
ms._db.drop_index('exp_1')
except Exception:
pass
ms._db.create_index('exp', expireAfterSeconds=1)
return ms
def test_set_key(st):
st['test_val'] = 'val'
assert st.get('test_val')['test_val'] == 'val'
def test_set_with_expiry(st):
st.set_with_expiry(['test_exp', 'field'], 'val')
assert st.get('test_exp')['field'] == 'val'
# Mongo expiry is checked every 60 seconds so results will fluctuate slightly due to timing.
# We'll test at the end of the suite to make sure the key is removed.
def _thread(st, n):
for i in range(n):
st.update('test', 'k2', f'w{i}')
def test_update(st):
st['test'] = {'k': 'v', 'k2': 'v2'}
t = threading.Thread(target=_thread, args=(st, n))
t.start()
for i in range(n):
st.update('test', 'k', f'v{i}')
t.join()
assert st['test'] == {'k': f'v{n-1}', 'k2': f'w{n-1}'}
def test_pop_field(st):
st['test'] = {'k': 'v', 'k2': 'v2', 'key': 'val'}
# this is the naive functionality we want; of course, this is not thread safe:
cur = st['test']
val = cur.pop('key')
st['test'] = cur
assert val == 'val'
assert st['test'] == {'k': 'v', 'k2': 'v2', 'key': 'val'}
# here's the non-threaded test:
st['test'] = {'k': 'v', 'k2': 'v2', 'key': 'val'}
assert st['test']['key'] == 'val'
val = st.pop_field(['test', 'key'])
assert val == 'val'
assert not type(st['test']) == str
# and finally, a threaded test:
st['test'] = {'k': 'v', 'k2': 'v2'}
for i in range(n):
st.update('test', f'key{i}', f'v{i}')
st['test']['key0'] = 'v0'
assert st['test']['key0'] == 'v0'
assert st['test'][f'key{n-1}'] == f'v{n-1}'
t = threading.Thread(target=_thread, args=(st, n))
t.start()
for i in range(n):
val = st.pop_field(['test', f'key{i}'])
assert val == f'v{i}'
t.join()
assert st['test'] == {'k': 'v', 'k2': f'w{n-1}'}
def test_update_subfield(st):
st['test'] = {'k': {'sub': 'v'}, 'k2': 'v2'}
t = threading.Thread(target=_thread, args=(st, n))
t.start()
for i in range(n):
st['test', 'k', 'sub'] = f'v{i}'
t.join()
assert st['test'] == {'k': {'sub': f'v{n-1}'}, 'k2': f'w{n-1}'}
def test_getset(st):
st['test'] = {'k': 'v', 'k2': 'v2'}
st.update('k', 'k', 'v0')
t = threading.Thread(target=_thread, args=(st, n))
t.start()
for i in range(n):
v = st.getset(['k'], f'v{i}')
if i == 0:
assert v == 'v0'
else:
assert v == f'v{i-1}'
t.join()
assert st['test'] == {'k': 'v', 'k2': f'w{n-1}'}
assert st['k']['k'] == f'v{n-1}'
def test_within_transaction(st):
# mongo store does not support within_transaction
if not store == 'redis':
return
def _th():
"""A separate thread that is going to compete with the main thread to make updates on the key."""
assert st['k'] == 'v'
time.sleep(1)
# try to update the value of 'k'; this should take a while since the other thread has a lock
start = timeit.default_timer()
res = st.full_update(
{'_id': 'k'},
{'$set': {'k':'v2'}})
stop = timeit.default_timer()
assert st['k'] == 'v2'
tot = stop - start
assert tot > 2.0
def _transaction(val):
"""Represents business logic that should be wrapped in a transaction."""
# make sure we are passed the value
assert val == 'v'
# also, get the key and assert the original value
assert st['k'] == 'v'
# now sleep some time
time.sleep(3)
# now, update the key:
st['k'] = 'foo'
assert st['k'] == 'foo'
st['k'] = 'v'
# first start a new thread that will sleep for 1 second before trying to change the value
t = threading.Thread(target=_th)
t.start()
# now, start a transaction in the main thread:
st.within_transaction(_transaction, 'k')
def test_redis_one_list_pop(st):
# mongo store does not support list mutations
if not store == 'redis':
return
def _th():
""" A separate thread that will compete with the main thread to make mutations to the two lists."""
for i in range(n):
st.pop_fromlist('l')
st['l'] = ['a'] * 5000
t = threading.Thread(target=_th)
t.start()
# now, in the main thread, perform the same processing but in reverse
for i in range(n):
st.pop_fromlist('l')
# wait for second thread to complete
t.join()
# verify that
assert len(st['l']) == 5000 - 2*n
def test_redis_one_list_append(st):
# mongo store does not support list mutations
if not store == 'redis':
return
def _th():
""" A separate thread that will compete with the main thread to make mutations to the two lists."""
for i in range(n):
st.append_tolist('l', 't2')
st['l'] = []
t = threading.Thread(target=_th)
t.start()
# now, in the main thread, perform the same processing but in reverse
for i in range(n):
st.append_tolist('l', 't1')
# wait for second thread to complete
t.join()
# verify that
assert len(st['l']) == 2*n
def test_redis_one_list_pop_append(st):
# mongo store does not support list mutations
if not store == 'redis':
return
def _th():
""" A separate thread that will compete with the main thread to make mutations to the two lists."""
for i in range(n):
st.append_tolist('l', 't2_{}'.format(i))
st.pop_fromlist('l')
st['l'] = ['a', 'b', 'c']
t = threading.Thread(target=_th)
t.start()
# now, in the main thread, perform the same processing but in reverse
for i in range(n):
st.append_tolist('l', 't1_{}'.format(i))
st.pop_fromlist('l')
# wait for second thread to complete
t.join()
# verify that free and locked lists each have three items; we cannot gu
assert len(st['l']) == 3
def test_redis_two_lists(st):
# mongo store does not support list mutations
if not store == 'redis':
return
def _th():
""" A separate thread that will compete with the main thread to make mutations to the two lists."""
# first, get the first free element
for i in range(n):
# first, get the first free item and move it to the locked list
first_free = st.pop_fromlist('free', 0)
st.append_tolist('locked', first_free)
# now, get first locked and move to free list
first_locked = st.pop_fromlist('locked', 0)
st.append_tolist('free', first_locked)
st['free'] = ['a', 'b', 'c']
st['locked'] = ['1', '2', '3']
t = threading.Thread(target=_th)
t.start()
# now, in the main thread, perform the same processing but in reverse
for i in range(n):
# first, get the first locked item and move it to the free list
first_locked = st.pop_fromlist('locked', 0)
st.append_tolist('free', first_locked)
# now, get first free and move to locked list
first_free = st.pop_fromlist('free', 0)
st.append_tolist('locked', first_free)
# wait for second thread to complete
t.join()
# verify that free and locked lists each have three items; we cannot gu
assert len(st['free']) == 3
assert len(st['locked']) == 3
def test_set_with_expiry2(st):
# Mongo expiry is checked every 60 seconds so results will fluctuate slightly due to timing.
# We'll test at the end of the suite to make sure the key is removed.
tot = 0
while tot < 5:
try:
st['test_exp']
except KeyError:
return
tot += 1
time.sleep(2)
|
processApproach.py
|
import random
import time
import sys
from multiprocessing import Process, Queue
random.seed()
def genList (size):
randomList = []
#initialize random list with values between 0 and 100
for i in range(size):
randomList.append(random.randint(0,10))
return randomList
#return the sum of all elements in the list
#This is the same as "return sum(inList)" but in long form for readability and emphasis
def sumList(inList):
finalSum = 0
#iterate over all values in the list, and calculate the cummulative sum
for value in inList:
finalSum = finalSum + value
return finalSum
def doWork(N,q):
#create a random list of N integers
myList = genList (N)
finalSum = sumList(myList)
#put the result in the Queue to return the the calling process
q.put(finalSum)
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1].isdigit():
N = int(sys.argv[1])
#mark the start time
startTime = time.time()
#create a Queue to share results
q = Queue()
#create 4 sub-processes to do the work
p1 = Process(target=doWork, args=(N/4,q))
p1.start()
p2 = Process(target=doWork, args=(N/4,q))
p2.start()
p3 = Process(target=doWork, args=(N/4,q))
p3.start()
p4 = Process(target=doWork, args=(N/4,q))
p4.start()
results = []
#grab 4 values from the queue, one for each process
for i in range(4):
#set block=True to block until we get a result
results.append(q.get(True))
#sum the partial results to get the final result
finalSum = sumList(results)
p1.join()
p2.join()
p3.join()
p4.join()
#mark the end time
endTime = time.time()
#calculate the total time it took to complete the work
workTime = endTime - startTime
#print results
print "The job took " + str(workTime) + " seconds to complete"
print "The final sum was: " + str(finalSum)
else:
exit(-1)
|
helper.py
|
# import sys
# import os
import cv2
import numpy
# from datetime import datetime, timedelta
# from threading import Thread
# import time
# import shutil
import socket
# from pympler import asizeof
# from psutil import virtual_memory
# import gc
# import psutil
#
# ##### GLOBAL VARIABLES
#
_is_setup = False
_annotate_line_colour = None
_annotate_grid_colour = None
# _timer_start_time = {}
# _elapsed_last_time = {}
# _lock_file = ''
# _run_lock_async = False
#
# ##### SETUP METHOD
#
# def setup(annotate_line_colour, annotate_grid_colour, lock_file):
def setup(annotate_line_colour, annotate_grid_colour):
"""
:param annotate_line_colour:
:param annotate_grid_colour:
"""
global _is_setup, _annotate_line_colour, _annotate_grid_colour #, _lock_file
_annotate_line_colour = annotate_line_colour
_annotate_grid_colour = annotate_grid_colour
# _lock_file = os.path.join(os.path.dirname(__file__), lock_file)
_is_setup = True
# #
# # ##### SAFETY LOCK FILE HANDLING
# #
# def update_safe_lock():
# """
# TODO: Documentation
# :return:
# """
# with open(_lock_file, 'w') as f:
# print('%s' % timestamp(), file=f)
#
#
# def start_safe_lock_async(interval_secs):
# """
#
# :return:
# """
# global _run_lock_async
# _run_lock_async = True
# update_safe_lock_thread = Thread(target=_update_safe_lock_async, args=(interval_secs,))
# update_safe_lock_thread.setDaemon(True)
# update_safe_lock_thread.start()
#
#
# def stop_safe_lock_async():
# global _run_lock_async
# _run_lock_async = False
#
#
# def _update_safe_lock_async(interval_secs):
# first_run = True
# while _run_lock_async:
# if first_run or secs_elapsed_since_last(secs=interval_secs, timer_id='lock_async'):
# with open(_lock_file, 'w') as f:
# print('%s' % timestamp(), file=f)
# first_run = False
# else:
# sleep(1)
#
#
# def check_safe_lock(max_secs_for_safe_lock):
# """
# TODO: Documentation
# :return:
# """
# if len(sys.argv) >= 2 and sys.argv[1] == 'safe_start':
# try:
# with open(_lock_file, 'r') as f:
# time_since_updated_lock = timestamp_age(f.read(19))
# print('>>>%s<<<' % time_since_updated_lock)
# if time_since_updated_lock < max_secs_for_safe_lock:
# return True
# except OSError:
# print('Lock-OSError')
# pass
# except ValueError:
# print('Lock-ValueError')
# pass
# return False
#
#
# def remove_safe_lock():
# """
# TODO: Documentation
# :return:
# """
# stop_safe_lock_async()
# try:
# os.remove(_lock_file)
# except OSError:
# pass
#
# #
# # ##### TIME-RELATED METHODS
# #
# def timestamp(ms=False):
# """ Simply returns a formatted timestamp for the current system time """
# if ms:
# return datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
# else:
# return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
#
#
# def datestamp(days_offset=0):
# """ Simply returns a formatted date for the current system time """
# return (datetime.now() + timedelta(days=days_offset)).strftime('%Y-%m-%d')
#
#
# def secs_to_hhmmss(secs, integer_secs=True):
# if integer_secs:
# secs = int(secs)
# return str(timedelta(seconds=secs))
#
#
# def timestamp_age(prev_timestamp):
# if len(prev_timestamp) == 19:
# return (datetime.now() - datetime.strptime(prev_timestamp, '%Y-%m-%d %H:%M:%S')).total_seconds()
# else:
# raise ValueError
#
#
# def start_timer(timer_id='std'):
# """ Starts a timer running, for performance testing.
# Saves the current time in a module-level dictionary, using the timer_id. Raise exception if the specified
# timer_id already exists.
# :param timer_id: An optional string to name this timer, for use when multiple timers are in use at once.
# """
# global _timer_start_time
# if timer_id in _timer_start_time:
# raise Exception('Duplicate timer_id requested - not allowed.')
#
# _timer_start_time[timer_id] = datetime.now()
#
#
# def read_timer(timer_id='std'):
# global _timer_start_time
# if timer_id not in _timer_start_time:
# raise Exception('helper.read_timer must only be called after helper.start_timer, with a matching timer_id')
# delta_time = datetime.now() - _timer_start_time[timer_id]
# return '%4.2fs' % (delta_time.total_seconds())
#
#
# def end_timer(timer_id='std'):
# """ Ends the current running timer, and returns the elapsed time (in seconds).
# Raises an exception if there isn't a matching timer_id already in the dictionary, meaning the timer hasn't been
# started. Once done this removes the start_time for this timer_id from the dictionary, so we can be sure we're
# using the correct timings.
# :param timer_id: An optional string to name this timer - must match the timer_id specified in start_timer()
# """
# global _timer_start_time
# if timer_id not in _timer_start_time:
# raise Exception('helper.end_timer must only be called after helper.start_timer, with a matching timer_id')
#
# delta_time = datetime.now() - _timer_start_time[timer_id]
# del _timer_start_time[timer_id]
# return '%4.2fs' % (delta_time.total_seconds())
#
#
# def clear_timer(timer_id='std'):
# """ Clears the current running timer - used if we break out of the timed activity early.
# Raises an exception if there isn't a matching timer_id already in the dictionary, meaning the timer hasn't been
# started. Once done this removes the start_time for this timer_id from the dictionary, so we can be sure we're
# using the correct timings.
# :param timer_id: An optional string to name this timer - must match the timer_id specified in start_timer()
# """
# global _timer_start_time
# if timer_id not in _timer_start_time:
# raise Exception('helper.end_timer must only be called after helper.start_timer, with a matching timer_id')
#
# del _timer_start_time[timer_id]
#
#
# def secs_elapsed_since_last(secs, timer_id='std'):
# global _elapsed_last_time
# if timer_id in _elapsed_last_time:
# delta_time = datetime.now() - _elapsed_last_time[timer_id]
# delta_seconds = delta_time.total_seconds()
# # Once the time difference exceeds the specified number of seconds, update the "last run" time and return True.
# if delta_seconds >= secs:
# _elapsed_last_time[timer_id] = datetime.now()
# return True
# else:
# # If the timer hasn't yet been initialised, set it to the current time
# _elapsed_last_time[timer_id] = datetime.now()
# return False
#
#
# def clear_elapsed_timer(timer_id='std'):
# """ Clears the current running timer - used if we break out of the timed activity early.
# Raises an exception if there isn't a matching timer_id already in the dictionary, meaning the timer hasn't been
# started. Once done this removes the start_time for this timer_id from the dictionary, so we can be sure we're
# using the correct timings.
# :param timer_id: An optional string to name this timer - must match the timer_id specified in start_timer()
# """
# global _elapsed_last_time
# if timer_id in _elapsed_last_time:
# del _elapsed_last_time[timer_id]
#
#
# def hours_elapsed_since_last(hours, timer_id='std'):
# """ Tests whether the specified number of hours have passed since this last returned True.
# This is used to run actions on a regular schedule, i.e. every x hours. On first-run, it will set the "last run"
# time to the current time, but return false. The "last run" time is then only reset to current when the
# specified number of hours have elapsed, and the function returns true.
# :param hours: The number of hours to wait between returning True from this function.
# :param timer_id: An optional string to name this timer - allows multiple different delays simultaneously.
# :return: Returns True once every x hours - otherwise returns False.
#
# """
# return secs_elapsed_since_last(hours*3600, timer_id)
#
# ##### IMAGE ANNOTATION METHODS
#
def annotate_grid(annotate_img, spacing):
""" Takes any image and plots grid-lines at specified spacing as an overlay. Updated in-place - no return value
:param annotate_img: An image (any size) - should be BGR colour space, otherwise overlay will be grey too
:param spacing: Pixel spacing between subsequent grid lines (both horizontal and vertical)
"""
if not _is_setup:
raise Exception('Must call helper.setup() before using helper functions')
max_y = annotate_img.shape[0]
max_x = annotate_img.shape[1]
for x in range(spacing-1, max_x, spacing):
cv2.line(annotate_img, (x, 0), (x, max_y), _annotate_grid_colour, 1)
for y in range(spacing-1, max_y, spacing):
cv2.line(annotate_img, (0, y), (max_x, y), _annotate_grid_colour, 1)
def annotate_contour(annotate_img, contour_points=None, contour_points_dict=None, contour=None, contours=None):
""" Takes any image and plots a contour as an overlay. Updated in-place - no return value.
The contour can be supplied as a simple python list of points in the format [[x,y],[x,y],[x,y],...]
(contour_points=), as a numpy / OpenCV-compatible contour (contour=), or as a list of numpy / OpenCV-
compatible contours (contours=). If multiple are provided, only the first is used.
:param annotate_img: An image (any size) - should be BGR colour space, otherwise overlay will be grey too
:param contour_points: A simple python list, in the format [[x,y],[x,y],[x,y],...]
:param contour: A numpy / OpenCV compatible contour
:param contours: A list of numpy / OpenCV compatible contours
"""
if not _is_setup:
raise Exception('Must call helper.setup() before using helper functions')
# Make individual contours into a single-element array, and convert to numpy format if needed
if contour_points is not None:
contours = [numpy.array(contour_points, dtype=numpy.int32)]
elif contour_points_dict is not None:
contours = []
for contour_points in contour_points_dict:
contours.append(numpy.array(contour_points['value'], dtype=numpy.int32))
elif contour is not None:
contours = [contour]
elif isinstance(contours, list):
pass
else:
raise Exception('Either contour=, contour_points= or contours= must be set for this function.')
cv2.drawContours(annotate_img, contours, -1, _annotate_line_colour, 1)
# def sleep(secs):
# time.sleep(secs)
def hostname():
return socket.gethostname().split('.')[0]
# def size_mb(test_object):
# size_bytes = asizeof.asizeof(test_object)
# return size_bytes / (1024 * 1024)
#
#
# def memory_usage():
# process = psutil.Process(os.getpid())
# mem = process.memory_full_info().uss / float(2 ** 20)
# return mem # In MB
#
#
# def memory_free():
# mem = virtual_memory()
# return mem.available / (1024 * 1024) # In MB
#
#
# def clear_memory():
# gc.collect()
#
#
# def enough_disk_space_for(mb_required, folder=os.path.dirname(__file__)):
# disk_free_space = shutil.disk_usage(folder).free
# if disk_free_space > mb_required * 1024 * 1024:
# return True
# else:
# return False
def get_temp_str():
try:
with open('/sys/class/thermal/thermal_zone0/temp', 'r') as f:
return '%dC' % (int(f.read(5)) / 1000)
except (OSError, ValueError):
return 'N/A'
|
tasks.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
from collections import OrderedDict, namedtuple, deque
import errno
import functools
import importlib
import json
import logging
import os
from io import StringIO
from contextlib import redirect_stdout
import shutil
import stat
import tempfile
import time
import traceback
from distutils.dir_util import copy_tree
from distutils.version import LooseVersion as Version
import yaml
import fcntl
from pathlib import Path
from uuid import uuid4
import urllib.parse as urlparse
import socket
import threading
import concurrent.futures
from base64 import b64encode
import sys
# Django
from django.conf import settings
from django.db import transaction, DatabaseError, IntegrityError
from django.db.models.fields.related import ForeignKey
from django.utils.timezone import now
from django.utils.encoding import smart_str
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _, gettext_noop
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django_guid.middleware import GuidMiddleware
# Django-CRUM
from crum import impersonate
# GitPython
import git
from gitdb.exc import BadName as BadGitName
# Runner
import ansible_runner
import ansible_runner.cleanup
# dateutil
from dateutil.parser import parse as parse_date
# AWX
from awx import __version__ as awx_application_version
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, MINIMAL_EVENTS, JOB_FOLDER_PREFIX
from awx.main.access import access_registry
from awx.main.redact import UriCleaner
from awx.main.models import (
Schedule,
TowerScheduleState,
Instance,
InstanceGroup,
UnifiedJob,
Notification,
Inventory,
InventorySource,
SmartInventoryMembership,
Job,
AdHocCommand,
ProjectUpdate,
InventoryUpdate,
SystemJob,
JobEvent,
ProjectUpdateEvent,
InventoryUpdateEvent,
AdHocCommandEvent,
SystemJobEvent,
build_safe_env,
)
from awx.main.constants import ACTIVE_STATES
from awx.main.exceptions import AwxTaskError, PostRunError, ReceptorNodeNotFound
from awx.main.queue import CallbackQueueDispatcher
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.utils.common import (
update_scm_url,
ignore_inventory_computed_fields,
ignore_inventory_group_removal,
extract_ansible_vars,
schedule_task_manager,
get_awx_version,
deepmerge,
parse_yaml_or_json,
cleanup_new_process,
create_partition,
)
from awx.main.utils.execution_environments import get_default_pod_spec, CONTAINER_ROOT, to_container_path
from awx.main.utils.ansible import read_ansible_config
from awx.main.utils.external_logging import reconfigure_rsyslog
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
from awx.main.utils.reload import stop_local_services
from awx.main.utils.pglock import advisory_lock
from awx.main.utils.handlers import SpecialInventoryHandler
from awx.main.utils.receptor import get_receptor_ctl, worker_info, get_conn_type, get_tls_client, worker_cleanup, administrative_workunit_reaper
from awx.main.consumers import emit_channel_notification
from awx.main import analytics
from awx.conf import settings_registry
from awx.conf.license import get_license
from awx.main.analytics.subsystem_metrics import Metrics
from rest_framework.exceptions import PermissionDenied
__all__ = [
'RunJob',
'RunSystemJob',
'RunProjectUpdate',
'RunInventoryUpdate',
'RunAdHocCommand',
'handle_work_error',
'handle_work_success',
'apply_cluster_membership_policies',
'update_inventory_computed_fields',
'update_host_smart_inventory_memberships',
'send_notifications',
'purge_old_stdout_files',
]
HIDDEN_PASSWORD = '**********'
OPENSSH_KEY_ERROR = u'''\
It looks like you're trying to use a private key in OpenSSH format, which \
isn't supported by the installed version of OpenSSH on this instance. \
Try upgrading OpenSSH or providing your private key in an different format. \
'''
logger = logging.getLogger('awx.main.tasks')
def dispatch_startup():
startup_logger = logging.getLogger('awx.main.tasks')
startup_logger.debug("Syncing Schedules")
for sch in Schedule.objects.all():
try:
sch.update_computed_fields()
except Exception:
logger.exception("Failed to rebuild schedule {}.".format(sch))
#
# When the dispatcher starts, if the instance cannot be found in the database,
# automatically register it. This is mostly useful for openshift-based
# deployments where:
#
# 2 Instances come online
# Instance B encounters a network blip, Instance A notices, and
# deprovisions it
# Instance B's connectivity is restored, the dispatcher starts, and it
# re-registers itself
#
# In traditional container-less deployments, instances don't get
# deprovisioned when they miss their heartbeat, so this code is mostly a
# no-op.
#
apply_cluster_membership_policies()
cluster_node_heartbeat()
Metrics().clear_values()
# Update Tower's rsyslog.conf file based on loggins settings in the db
reconfigure_rsyslog()
def inform_cluster_of_shutdown():
try:
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
this_inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
try:
reaper.reap(this_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
except Exception:
logger.exception('Encountered problem with normal shutdown signal.')
@task(queue=get_local_queuename)
def apply_cluster_membership_policies():
from awx.main.signals import disable_activity_stream
started_waiting = time.time()
with advisory_lock('cluster_policy_lock', wait=True):
lock_time = time.time() - started_waiting
if lock_time > 1.0:
to_log = logger.info
else:
to_log = logger.debug
to_log('Waited {} seconds to obtain lock name: cluster_policy_lock'.format(lock_time))
started_compute = time.time()
all_instances = list(Instance.objects.order_by('id'))
all_groups = list(InstanceGroup.objects.prefetch_related('instances'))
total_instances = len(all_instances)
actual_groups = []
actual_instances = []
Group = namedtuple('Group', ['obj', 'instances', 'prior_instances'])
Node = namedtuple('Instance', ['obj', 'groups'])
# Process policy instance list first, these will represent manually managed memberships
instance_hostnames_map = {inst.hostname: inst for inst in all_instances}
for ig in all_groups:
group_actual = Group(obj=ig, instances=[], prior_instances=[instance.pk for instance in ig.instances.all()]) # obtained in prefetch
for hostname in ig.policy_instance_list:
if hostname not in instance_hostnames_map:
logger.info("Unknown instance {} in {} policy list".format(hostname, ig.name))
continue
inst = instance_hostnames_map[hostname]
group_actual.instances.append(inst.id)
# NOTE: arguable behavior: policy-list-group is not added to
# instance's group count for consideration in minimum-policy rules
if group_actual.instances:
logger.debug("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name))
actual_groups.append(group_actual)
# Process Instance minimum policies next, since it represents a concrete lower bound to the
# number of instances to make available to instance groups
actual_instances = [Node(obj=i, groups=[]) for i in all_instances if i.managed_by_policy]
logger.debug("Total instances: {}, available for policy: {}".format(total_instances, len(actual_instances)))
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
exclude_type = 'execution' if g.obj.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME else 'control'
policy_min_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if i.obj.node_type == exclude_type:
continue # never place execution instances in controlplane group or control instances in other groups
if len(g.instances) >= g.obj.policy_instance_minimum:
break
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via the policy list
continue
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_min_added.append(i.obj.id)
if policy_min_added:
logger.debug("Policy minimum, adding Instances {} to Group {}".format(policy_min_added, g.obj.name))
# Finally, process instance policy percentages
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
exclude_type = 'execution' if g.obj.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME else 'control'
candidate_pool_ct = len([i for i in actual_instances if i.obj.node_type != exclude_type])
if not candidate_pool_ct:
continue
policy_per_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if i.obj.node_type == exclude_type:
continue
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via a minimum policy or policy list
continue
if 100 * float(len(g.instances)) / candidate_pool_ct >= g.obj.policy_instance_percentage:
break
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_per_added.append(i.obj.id)
if policy_per_added:
logger.debug("Policy percentage, adding Instances {} to Group {}".format(policy_per_added, g.obj.name))
# Determine if any changes need to be made
needs_change = False
for g in actual_groups:
if set(g.instances) != set(g.prior_instances):
needs_change = True
break
if not needs_change:
logger.debug('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute))
return
# On a differential basis, apply instances to groups
with transaction.atomic():
with disable_activity_stream():
for g in actual_groups:
if g.obj.is_container_group:
logger.debug('Skipping containerized group {} for policy calculation'.format(g.obj.name))
continue
instances_to_add = set(g.instances) - set(g.prior_instances)
instances_to_remove = set(g.prior_instances) - set(g.instances)
if instances_to_add:
logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
g.obj.instances.add(*instances_to_add)
if instances_to_remove:
logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
g.obj.instances.remove(*instances_to_remove)
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@task(queue='tower_broadcast_all')
def handle_setting_changes(setting_keys):
orig_len = len(setting_keys)
for i in range(orig_len):
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
setting_keys.append(dependent_key)
cache_keys = set(setting_keys)
logger.debug('cache delete_many(%r)', cache_keys)
cache.delete_many(cache_keys)
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
reconfigure_rsyslog()
@task(queue='tower_broadcast_all')
def delete_project_files(project_path):
# TODO: possibly implement some retry logic
lock_file = project_path + '.lock'
if os.path.exists(project_path):
try:
shutil.rmtree(project_path)
logger.debug('Success removing project files {}'.format(project_path))
except Exception:
logger.exception('Could not remove project directory {}'.format(project_path))
if os.path.exists(lock_file):
try:
os.remove(lock_file)
logger.debug('Success removing {}'.format(lock_file))
except Exception:
logger.exception('Could not remove lock file {}'.format(lock_file))
@task(queue='tower_broadcast_all')
def profile_sql(threshold=1, minutes=1):
if threshold <= 0:
cache.delete('awx-profile-sql-threshold')
logger.error('SQL PROFILING DISABLED')
else:
cache.set('awx-profile-sql-threshold', threshold, timeout=minutes * 60)
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
@task(queue=get_local_queuename)
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
if job_id is not None:
job_actual = UnifiedJob.objects.get(id=job_id)
notifications = Notification.objects.filter(id__in=notification_list)
if job_id is not None:
job_actual.notifications.add(*notifications)
for notification in notifications:
update_fields = ['status', 'notifications_sent']
try:
sent = notification.notification_template.send(notification.subject, notification.body)
notification.status = "successful"
notification.notifications_sent = sent
if job_id is not None:
job_actual.log_lifecycle("notifications_sent")
except Exception as e:
logger.exception("Send Notification Failed {}".format(e))
notification.status = "failed"
notification.error = smart_str(e)
update_fields.append('error')
finally:
try:
notification.save(update_fields=update_fields)
except Exception:
logger.exception('Error saving notification {} result.'.format(notification.id))
@task(queue=get_local_queuename)
def gather_analytics():
from awx.conf.models import Setting
from rest_framework.fields import DateTimeField
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather and last_gather.value else None
gather_time = now()
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
analytics.gather()
@task(queue=get_local_queuename)
def purge_old_stdout_files():
nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT):
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT, f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
os.unlink(os.path.join(settings.JOBOUTPUT_ROOT, f))
logger.debug("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT, f)))
def _cleanup_images_and_files(**kwargs):
if settings.IS_K8S:
return
this_inst = Instance.objects.me()
runner_cleanup_kwargs = this_inst.get_cleanup_task_kwargs(**kwargs)
if runner_cleanup_kwargs:
stdout = ''
with StringIO() as buffer:
with redirect_stdout(buffer):
ansible_runner.cleanup.run_cleanup(runner_cleanup_kwargs)
stdout = buffer.getvalue()
if '(changed: True)' in stdout:
logger.info(f'Performed local cleanup with kwargs {kwargs}, output:\n{stdout}')
# if we are the first instance alphabetically, then run cleanup on execution nodes
checker_instance = Instance.objects.filter(node_type__in=['hybrid', 'control'], enabled=True, capacity__gt=0).order_by('-hostname').first()
if checker_instance and this_inst.hostname == checker_instance.hostname:
for inst in Instance.objects.filter(node_type='execution', enabled=True, capacity__gt=0):
runner_cleanup_kwargs = inst.get_cleanup_task_kwargs(**kwargs)
if not runner_cleanup_kwargs:
continue
try:
stdout = worker_cleanup(inst.hostname, runner_cleanup_kwargs)
if '(changed: True)' in stdout:
logger.info(f'Performed cleanup on execution node {inst.hostname} with output:\n{stdout}')
except RuntimeError:
logger.exception(f'Error running cleanup on execution node {inst.hostname}')
@task(queue='tower_broadcast_all')
def handle_removed_image(remove_images=None):
"""Special broadcast invocation of this method to handle case of deleted EE"""
_cleanup_images_and_files(remove_images=remove_images, file_pattern='')
@task(queue=get_local_queuename)
def cleanup_images_and_files():
_cleanup_images_and_files()
@task(queue=get_local_queuename)
def cluster_node_health_check(node):
"""
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
"""
if node == '':
logger.warn('Local health check incorrectly called with blank string')
return
elif node != settings.CLUSTER_HOST_ID:
logger.warn(f'Local health check for {node} incorrectly sent to {settings.CLUSTER_HOST_ID}')
return
try:
this_inst = Instance.objects.me()
except Instance.DoesNotExist:
logger.warn(f'Instance record for {node} missing, could not check capacity.')
return
this_inst.local_health_check()
@task(queue=get_local_queuename)
def execution_node_health_check(node):
if node == '':
logger.warn('Remote health check incorrectly called with blank string')
return
try:
instance = Instance.objects.get(hostname=node)
except Instance.DoesNotExist:
logger.warn(f'Instance record for {node} missing, could not check capacity.')
return
if instance.node_type != 'execution':
raise RuntimeError(f'Execution node health check ran against {instance.node_type} node {instance.hostname}')
data = worker_info(node)
prior_capacity = instance.capacity
instance.save_health_data(
version='ansible-runner-' + data.get('runner_version', '???'),
cpu=data.get('cpu_count', 0),
memory=data.get('mem_in_bytes', 0),
uuid=data.get('uuid'),
errors='\n'.join(data.get('errors', [])),
)
if data['errors']:
formatted_error = "\n".join(data["errors"])
if prior_capacity:
logger.warn(f'Health check marking execution node {node} as lost, errors:\n{formatted_error}')
else:
logger.info(f'Failed to find capacity of new or lost execution node {node}, errors:\n{formatted_error}')
else:
logger.info('Set capacity of execution node {} to {}, worker info data:\n{}'.format(node, instance.capacity, json.dumps(data, indent=2)))
return data
def inspect_execution_nodes(instance_list):
with advisory_lock('inspect_execution_nodes_lock', wait=False):
node_lookup = {}
for inst in instance_list:
if inst.node_type == 'execution':
node_lookup[inst.hostname] = inst
ctl = get_receptor_ctl()
connections = ctl.simple_command('status')['Advertisements']
nowtime = now()
for ad in connections:
hostname = ad['NodeID']
commands = ad.get('WorkCommands') or []
worktypes = []
for c in commands:
worktypes.append(c["WorkType"])
if 'ansible-runner' not in worktypes:
continue
changed = False
if hostname in node_lookup:
instance = node_lookup[hostname]
elif settings.MESH_AUTODISCOVERY_ENABLED:
defaults = dict(enabled=False)
(changed, instance) = Instance.objects.register(hostname=hostname, node_type='execution', defaults=defaults)
logger.warn(f"Registered execution node '{hostname}' (marked disabled by default)")
else:
logger.warn(f"Unrecognized node on mesh advertising ansible-runner work type: {hostname}")
continue
was_lost = instance.is_lost(ref_time=nowtime)
last_seen = parse_date(ad['Time'])
if instance.last_seen and instance.last_seen >= last_seen:
continue
instance.last_seen = last_seen
instance.save(update_fields=['last_seen'])
if changed:
execution_node_health_check.apply_async([hostname])
elif was_lost:
# if the instance *was* lost, but has appeared again,
# attempt to re-establish the initial capacity and version
# check
logger.warn(f'Execution node attempting to rejoin as instance {hostname}.')
execution_node_health_check.apply_async([hostname])
elif instance.capacity == 0 and instance.enabled:
# nodes with proven connection but need remediation run health checks are reduced frequency
if not instance.last_health_check or (nowtime - instance.last_health_check).total_seconds() >= settings.EXECUTION_NODE_REMEDIATION_CHECKS:
# Periodically re-run the health check of errored nodes, in case someone fixed it
# TODO: perhaps decrease the frequency of these checks
logger.debug(f'Restarting health check for execution node {hostname} with known errors.')
execution_node_health_check.apply_async([hostname])
@task(queue=get_local_queuename)
def cluster_node_heartbeat():
logger.debug("Cluster node heartbeat task.")
nowtime = now()
instance_list = list(Instance.objects.all())
this_inst = None
lost_instances = []
for inst in instance_list:
if inst.hostname == settings.CLUSTER_HOST_ID:
this_inst = inst
instance_list.remove(inst)
break
else:
(changed, this_inst) = Instance.objects.get_or_register()
if changed:
logger.info("Registered tower control node '{}'".format(this_inst.hostname))
inspect_execution_nodes(instance_list)
for inst in list(instance_list):
if inst.is_lost(ref_time=nowtime):
lost_instances.append(inst)
instance_list.remove(inst)
if this_inst:
startup_event = this_inst.is_lost(ref_time=nowtime)
this_inst.local_health_check()
if startup_event and this_inst.capacity != 0:
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
return
else:
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
# IFF any node has a greater version than we do, then we'll shutdown services
for other_inst in instance_list:
if other_inst.version == "" or other_inst.version.startswith('ansible-runner') or other_inst.node_type == 'execution':
continue
if Version(other_inst.version.split('-', 1)[0]) > Version(awx_application_version.split('-', 1)[0]) and not settings.DEBUG:
logger.error(
"Host {} reports version {}, but this node {} is at {}, shutting down".format(
other_inst.hostname, other_inst.version, this_inst.hostname, this_inst.version
)
)
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
# The heartbeat task will reset the capacity to the system capacity after upgrade.
stop_local_services(communicate=False)
raise RuntimeError("Shutting down.")
for other_inst in lost_instances:
try:
reaper.reap(other_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
try:
# Capacity could already be 0 because:
# * It's a new node and it never had a heartbeat
# * It was set to 0 by another tower node running this method
# * It was set to 0 by this node, but auto deprovisioning is off
#
# If auto deprovisining is on, don't bother setting the capacity to 0
# since we will delete the node anyway.
if other_inst.capacity != 0 and not settings.AWX_AUTO_DEPROVISION_INSTANCES:
other_inst.mark_offline(errors=_('Another cluster node has determined this instance to be unresponsive'))
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.last_seen))
elif settings.AWX_AUTO_DEPROVISION_INSTANCES:
deprovision_hostname = other_inst.hostname
other_inst.delete()
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
else:
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
@task(queue=get_local_queuename)
def awx_receptor_workunit_reaper():
"""
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
in a specific receptor directory. This directory on disk is a random 8 character string, e.g. qLL2JFNT
This is also called the work Unit ID in receptor, and is used in various receptor commands,
e.g. "work results qLL2JFNT"
After an AWX job executes, the receptor work unit directory is cleaned up by
issuing the work release command. In some cases the release process might fail, or
if AWX crashes during a job's execution, the work release command is never issued to begin with.
As such, this periodic task will obtain a list of all receptor work units, and find which ones
belong to AWX jobs that are in a completed state (status is canceled, error, or succeeded).
This task will call "work release" on each of these work units to clean up the files on disk.
Note that when we call "work release" on a work unit that actually represents remote work
both the local and remote work units are cleaned up.
Since we are cleaning up jobs that controller considers to be inactive, we take the added
precaution of calling "work cancel" in case the work unit is still active.
"""
if not settings.RECEPTOR_RELEASE_WORK:
return
logger.debug("Checking for unreleased receptor work units")
receptor_ctl = get_receptor_ctl()
receptor_work_list = receptor_ctl.simple_command("work list")
unit_ids = [id for id in receptor_work_list]
jobs_with_unreleased_receptor_units = UnifiedJob.objects.filter(work_unit_id__in=unit_ids).exclude(status__in=ACTIVE_STATES)
for job in jobs_with_unreleased_receptor_units:
logger.debug(f"{job.log_format} is not active, reaping receptor work unit {job.work_unit_id}")
receptor_ctl.simple_command(f"work cancel {job.work_unit_id}")
receptor_ctl.simple_command(f"work release {job.work_unit_id}")
administrative_workunit_reaper(receptor_work_list)
@task(queue=get_local_queuename)
def awx_k8s_reaper():
if not settings.RECEPTOR_RELEASE_WORK:
return
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
pods = PodManager.list_active_jobs(group)
for job in UnifiedJob.objects.filter(pk__in=pods.keys()).exclude(status__in=ACTIVE_STATES):
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
try:
pm = PodManager(job)
pm.kube_api.delete_namespaced_pod(name=pods[job.id], namespace=pm.namespace, _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)
except Exception:
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
@task(queue=get_local_queuename)
def awx_periodic_scheduler():
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
if acquired is False:
logger.debug("Not running periodic scheduler, another task holds lock")
return
logger.debug("Starting periodic scheduler")
run_now = now()
state = TowerScheduleState.get_solo()
last_run = state.schedule_last_run
logger.debug("Last scheduler run was: %s", last_run)
state.schedule_last_run = run_now
state.save()
old_schedules = Schedule.objects.enabled().before(last_run)
for schedule in old_schedules:
schedule.update_computed_fields()
schedules = Schedule.objects.enabled().between(last_run, run_now)
invalid_license = False
try:
access_registry[Job](None).check_license(quiet=True)
except PermissionDenied as e:
invalid_license = e
for schedule in schedules:
template = schedule.unified_job_template
schedule.update_computed_fields() # To update next_run timestamp.
if template.cache_timeout_blocked:
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
continue
try:
job_kwargs = schedule.get_job_kwargs()
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
logger.debug('Spawned {} from schedule {}-{}.'.format(new_unified_job.log_format, schedule.name, schedule.pk))
if invalid_license:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = str(invalid_license)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
raise invalid_license
can_start = new_unified_job.signal_start()
except Exception:
logger.exception('Error spawning scheduled job.')
continue
if not can_start:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = gettext_noop(
"Scheduled job could not start because it \
was not in the right state or required manual credentials"
)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
state.save()
@task(queue=get_local_queuename)
def handle_work_success(task_actual):
try:
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
return
if not instance:
return
schedule_task_manager()
@task(queue=get_local_queuename)
def handle_work_error(task_id, *args, **kwargs):
subtasks = kwargs.get('subtasks', None)
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
first_instance = None
first_instance_type = ''
if subtasks is not None:
for each_task in subtasks:
try:
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
if not instance:
# Unknown task type
logger.warn("Unknown task type: {}".format(each_task['type']))
continue
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
continue
if first_instance is None:
first_instance = instance
first_instance_type = each_task['type']
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
instance.status = 'failed'
instance.failed = True
if not instance.job_explanation:
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
first_instance_type,
first_instance.name,
first_instance.id,
)
instance.save()
instance.websocket_emit_status("failed")
# We only send 1 job complete message since all the job completion message
# handling does is trigger the scheduler. If we extend the functionality of
# what the job complete message handler does then we may want to send a
# completion event for each job here.
if first_instance:
schedule_task_manager()
pass
@task(queue=get_local_queuename)
def handle_success_and_failure_notifications(job_id):
uj = UnifiedJob.objects.get(pk=job_id)
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
return
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_id)
logger.warn(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
@task(queue=get_local_queuename)
def update_inventory_computed_fields(inventory_id):
"""
Signal handler and wrapper around inventory.update_computed_fields to
prevent unnecessary recursive calls.
"""
i = Inventory.objects.filter(id=inventory_id)
if not i.exists():
logger.error("Update Inventory Computed Fields failed due to missing inventory: " + str(inventory_id))
return
i = i[0]
try:
i.update_computed_fields()
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
return
raise
def update_smart_memberships_for_inventory(smart_inventory):
current = set(SmartInventoryMembership.objects.filter(inventory=smart_inventory).values_list('host_id', flat=True))
new = set(smart_inventory.hosts.values_list('id', flat=True))
additions = new - current
removals = current - new
if additions or removals:
with transaction.atomic():
if removals:
SmartInventoryMembership.objects.filter(inventory=smart_inventory, host_id__in=removals).delete()
if additions:
add_for_inventory = [SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id) for host_id in additions]
SmartInventoryMembership.objects.bulk_create(add_for_inventory, ignore_conflicts=True)
logger.debug(
'Smart host membership cached for {}, {} additions, {} removals, {} total count.'.format(
smart_inventory.pk, len(additions), len(removals), len(new)
)
)
return True # changed
return False
@task(queue=get_local_queuename)
def update_host_smart_inventory_memberships():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
changed_inventories = set([])
for smart_inventory in smart_inventories:
try:
changed = update_smart_memberships_for_inventory(smart_inventory)
if changed:
changed_inventories.add(smart_inventory)
except IntegrityError:
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
# Update computed fields for changed inventories outside atomic action
for smart_inventory in changed_inventories:
smart_inventory.update_computed_fields()
@task(queue=get_local_queuename)
def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user
if user_id is None:
user = None
else:
try:
user = User.objects.get(id=user_id)
except Exception:
user = None
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
try:
i = Inventory.objects.get(id=inventory_id)
for host in i.hosts.iterator():
host.job_events_as_primary_host.update(host=None)
i.delete()
emit_channel_notification('inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'})
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
except Inventory.DoesNotExist:
logger.exception("Delete Inventory failed due to missing inventory: " + str(inventory_id))
return
except DatabaseError:
logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id))
if retries > 0:
time.sleep(10)
delete_inventory(inventory_id, user_id, retries=retries - 1)
def with_path_cleanup(f):
@functools.wraps(f)
def _wrapped(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
finally:
for p in self.cleanup_paths:
try:
if os.path.isdir(p):
shutil.rmtree(p, ignore_errors=True)
elif os.path.exists(p):
os.remove(p)
except OSError:
logger.exception("Failed to remove tmp file: {}".format(p))
self.cleanup_paths = []
return _wrapped
class BaseTask(object):
model = None
event_model = None
abstract = True
def __init__(self):
self.cleanup_paths = []
self.parent_workflow_job_id = None
self.host_map = {}
self.guid = GuidMiddleware.get_guid()
self.job_created = None
self.recent_event_timings = deque(maxlen=settings.MAX_WEBSOCKET_EVENT_RATE)
def update_model(self, pk, _attempt=0, **updates):
"""Reload the model instance from the database and update the
given fields.
"""
try:
with transaction.atomic():
# Retrieve the model instance.
instance = self.model.objects.get(pk=pk)
# Update the appropriate fields and save the model
# instance, then return the new instance.
if updates:
update_fields = ['modified']
for field, value in updates.items():
setattr(instance, field, value)
update_fields.append(field)
if field == 'status':
update_fields.append('failed')
instance.save(update_fields=update_fields)
return instance
except DatabaseError as e:
# Log out the error to the debug logger.
logger.debug('Database error updating %s, retrying in 5 ' 'seconds (retry #%d): %s', self.model._meta.object_name, _attempt + 1, e)
# Attempt to retry the update, assuming we haven't already
# tried too many times.
if _attempt < 5:
time.sleep(5)
return self.update_model(pk, _attempt=_attempt + 1, **updates)
else:
logger.error('Failed to update %s after %d retries.', self.model._meta.object_name, _attempt)
def get_path_to(self, *args):
"""
Return absolute path relative to this file.
"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
image = instance.execution_environment.image
params = {
"container_image": image,
"process_isolation": True,
"process_isolation_executable": "podman", # need to provide, runner enforces default via argparse
"container_options": ['--user=root'],
}
if instance.execution_environment.credential:
cred = instance.execution_environment.credential
if all([cred.has_input(field_name) for field_name in ('host', 'username', 'password')]):
host = cred.get_input('host')
username = cred.get_input('username')
password = cred.get_input('password')
verify_ssl = cred.get_input('verify_ssl')
params['container_auth_data'] = {'host': host, 'username': username, 'password': password, 'verify_ssl': verify_ssl}
else:
raise RuntimeError('Please recheck that your host, username, and password fields are all filled.')
pull = instance.execution_environment.pull
if pull:
params['container_options'].append(f'--pull={pull}')
if settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'] = []
for this_path in settings.AWX_ISOLATION_SHOW_PATHS:
# Using z allows the dir to mounted by multiple containers
# Uppercase Z restricts access (in weird ways) to 1 container at a time
params['container_volume_mounts'].append(f'{this_path}:{this_path}:z')
return params
def build_private_data(self, instance, private_data_dir):
"""
Return SSH private key data (only if stored in DB as ssh_key_data).
Return structure is a dict of the form:
"""
def build_private_data_dir(self, instance):
"""
Create a temporary directory for job-related files.
"""
path = tempfile.mkdtemp(prefix=JOB_FOLDER_PREFIX % instance.pk, dir=settings.AWX_ISOLATION_BASE_PATH)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
if settings.AWX_CLEANUP_PATHS:
self.cleanup_paths.append(path)
# Ansible runner requires that project exists,
# and we will write files in the other folders without pre-creating the folder
for subfolder in ('project', 'inventory', 'env'):
runner_subfolder = os.path.join(path, subfolder)
if not os.path.exists(runner_subfolder):
os.mkdir(runner_subfolder)
return path
def build_private_data_files(self, instance, private_data_dir):
"""
Creates temporary files containing the private data.
Returns a dictionary i.e.,
{
'credentials': {
<awx.main.models.Credential>: '/path/to/decrypted/data',
<awx.main.models.Credential>: '/path/to/decrypted/data',
...
},
'certificates': {
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
...
}
}
"""
private_data = self.build_private_data(instance, private_data_dir)
private_data_files = {'credentials': {}}
if private_data is not None:
for credential, data in private_data.get('credentials', {}).items():
# OpenSSH formatted keys must have a trailing newline to be
# accepted by ssh-add.
if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'):
data += '\n'
# For credentials used with ssh-add, write to a named pipe which
# will be read then closed, instead of leaving the SSH key on disk.
if credential and credential.credential_type.namespace in ('ssh', 'scm'):
try:
os.mkdir(os.path.join(private_data_dir, 'env'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(private_data_dir, 'env', 'ssh_key')
ansible_runner.utils.open_fifo_write(path, data.encode())
private_data_files['credentials']['ssh'] = path
# Ansible network modules do not yet support ssh-agent.
# Instead, ssh private key file is explicitly passed via an
# env variable.
else:
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
f = os.fdopen(handle, 'w')
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
private_data_files['credentials'][credential] = path
for credential, data in private_data.get('certificates', {}).items():
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
if not os.path.exists(artifact_dir):
os.makedirs(artifact_dir, mode=0o700)
path = os.path.join(artifact_dir, 'ssh_key_data-cert.pub')
with open(path, 'w') as f:
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
return private_data_files
def build_passwords(self, instance, runtime_passwords):
"""
Build a dictionary of passwords for responding to prompts.
"""
return {
'yes': 'yes',
'no': 'no',
'': '',
}
def build_extra_vars_file(self, instance, private_data_dir):
"""
Build ansible yaml file filled with extra vars to be passed via -e@file.yml
"""
def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'extravars')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
if settings.ALLOW_JINJA_IN_EXTRA_VARS == 'always':
f.write(yaml.safe_dump(vars))
else:
f.write(safe_dump(vars, safe_dict))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def add_awx_venv(self, env):
env['VIRTUAL_ENV'] = settings.AWX_VENV_PATH
if 'PATH' in env:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin") + ":" + env['PATH']
else:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin")
def build_env(self, instance, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = {}
# Add ANSIBLE_* settings to the subprocess environment.
for attr in dir(settings):
if attr == attr.upper() and attr.startswith('ANSIBLE_'):
env[attr] = str(getattr(settings, attr))
# Also set environment variables configured in AWX_TASK_ENV setting.
for key, value in settings.AWX_TASK_ENV.items():
env[key] = str(value)
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
if self.instance.execution_environment is None:
raise RuntimeError('The project could not sync because there is no Execution Environment.')
return env
def build_inventory(self, instance, private_data_dir):
script_params = dict(hostvars=True, towervars=True)
if hasattr(instance, 'job_slice_number'):
script_params['slice_number'] = instance.job_slice_number
script_params['slice_count'] = instance.job_slice_count
script_data = instance.inventory.get_script_data(**script_params)
# maintain a list of host_name --> host_id
# so we can associate emitted events to Host objects
self.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
json_data = json.dumps(script_data)
path = os.path.join(private_data_dir, 'inventory')
fn = os.path.join(path, 'hosts')
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)
f.write('#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json_data)
return fn
def build_args(self, instance, private_data_dir, passwords):
raise NotImplementedError
def write_args_file(self, private_data_dir, args):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'cmdline')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(ansible_runner.utils.args2cmdline(*args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_credentials_list(self, instance):
return []
def get_instance_timeout(self, instance):
global_timeout_setting_name = instance._global_timeout_setting()
if global_timeout_setting_name:
global_timeout = getattr(settings, global_timeout_setting_name, 0)
local_timeout = getattr(instance, 'timeout', 0)
job_timeout = global_timeout if local_timeout == 0 else local_timeout
job_timeout = 0 if local_timeout < 0 else job_timeout
else:
job_timeout = 0
return job_timeout
def get_password_prompts(self, passwords={}):
"""
Return a dictionary where keys are strings or regular expressions for
prompts, and values are password lookup keys (keys that are returned
from build_passwords).
"""
return OrderedDict()
def create_expect_passwords_data_struct(self, password_prompts, passwords):
expect_passwords = {}
for k, v in password_prompts.items():
expect_passwords[k] = passwords.get(v, '') or ''
return expect_passwords
def pre_run_hook(self, instance, private_data_dir):
"""
Hook for any steps to run before the job/task starts
"""
instance.log_lifecycle("pre_run")
def post_run_hook(self, instance, status):
"""
Hook for any steps to run before job/task is marked as complete.
"""
instance.log_lifecycle("post_run")
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
"""
Hook for any steps to run after job/task is marked as complete.
"""
instance.log_lifecycle("finalize_run")
job_profiling_dir = os.path.join(private_data_dir, 'artifacts/playbook_profiling')
awx_profiling_dir = '/var/log/tower/playbook_profiling/'
collections_info = os.path.join(private_data_dir, 'artifacts/', 'collections.json')
ansible_version_file = os.path.join(private_data_dir, 'artifacts/', 'ansible_version.txt')
if not os.path.exists(awx_profiling_dir):
os.mkdir(awx_profiling_dir)
if os.path.isdir(job_profiling_dir):
shutil.copytree(job_profiling_dir, os.path.join(awx_profiling_dir, str(instance.pk)))
if os.path.exists(collections_info):
with open(collections_info) as ee_json_info:
ee_collections_info = json.loads(ee_json_info.read())
instance.installed_collections = ee_collections_info
instance.save(update_fields=['installed_collections'])
if os.path.exists(ansible_version_file):
with open(ansible_version_file) as ee_ansible_info:
ansible_version_info = ee_ansible_info.readline()
instance.ansible_version = ansible_version_info
instance.save(update_fields=['ansible_version'])
def event_handler(self, event_data):
#
# ⚠️ D-D-D-DANGER ZONE ⚠️
# This method is called once for *every event* emitted by Ansible
# Runner as a playbook runs. That means that changes to the code in
# this method are _very_ likely to introduce performance regressions.
#
# Even if this function is made on average .05s slower, it can have
# devastating performance implications for playbooks that emit
# tens or hundreds of thousands of events.
#
# Proceed with caution!
#
"""
Ansible runner puts a parent_uuid on each event, no matter what the type.
AWX only saves the parent_uuid if the event is for a Job.
"""
# cache end_line locally for RunInventoryUpdate tasks
# which generate job events from two 'streams':
# ansible-inventory and the awx.main.commands.inventory_import
# logger
if isinstance(self, RunInventoryUpdate):
self.end_line = event_data['end_line']
if event_data.get(self.event_data_key, None):
if self.event_data_key != 'job_id':
event_data.pop('parent_uuid', None)
if self.parent_workflow_job_id:
event_data['workflow_job_id'] = self.parent_workflow_job_id
event_data['job_created'] = self.job_created
if self.host_map:
host = event_data.get('event_data', {}).get('host', '').strip()
if host:
event_data['host_name'] = host
if host in self.host_map:
event_data['host_id'] = self.host_map[host]
else:
event_data['host_name'] = ''
event_data['host_id'] = ''
if event_data.get('event') == 'playbook_on_stats':
event_data['host_map'] = self.host_map
if isinstance(self, RunProjectUpdate):
# it's common for Ansible's SCM modules to print
# error messages on failure that contain the plaintext
# basic auth credentials (username + password)
# it's also common for the nested event data itself (['res']['...'])
# to contain unredacted text on failure
# this is a _little_ expensive to filter
# with regex, but project updates don't have many events,
# so it *should* have a negligible performance impact
task = event_data.get('event_data', {}).get('task_action')
try:
if task in ('git', 'svn'):
event_data_json = json.dumps(event_data)
event_data_json = UriCleaner.remove_sensitive(event_data_json)
event_data = json.loads(event_data_json)
except json.JSONDecodeError:
pass
if 'event_data' in event_data:
event_data['event_data']['guid'] = self.guid
# To prevent overwhelming the broadcast queue, skip some websocket messages
if self.recent_event_timings:
cpu_time = time.time()
first_window_time = self.recent_event_timings[0]
last_window_time = self.recent_event_timings[-1]
if event_data.get('event') in MINIMAL_EVENTS:
should_emit = True # always send some types like playbook_on_stats
elif event_data.get('stdout') == '' and event_data['start_line'] == event_data['end_line']:
should_emit = False # exclude events with no output
else:
should_emit = any(
[
# if 30the most recent websocket message was sent over 1 second ago
cpu_time - first_window_time > 1.0,
# if the very last websocket message came in over 1/30 seconds ago
self.recent_event_timings.maxlen * (cpu_time - last_window_time) > 1.0,
# if the queue is not yet full
len(self.recent_event_timings) != self.recent_event_timings.maxlen,
]
)
if should_emit:
self.recent_event_timings.append(cpu_time)
else:
event_data.setdefault('event_data', {})
event_data['skip_websocket_message'] = True
elif self.recent_event_timings.maxlen:
self.recent_event_timings.append(time.time())
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
self.event_ct += 1
'''
Handle artifacts
'''
if event_data.get('event_data', {}).get('artifact_data', {}):
self.instance.artifacts = event_data['event_data']['artifact_data']
self.instance.save(update_fields=['artifacts'])
return False
def cancel_callback(self):
"""
Ansible runner callback to tell the job when/if it is canceled
"""
unified_job_id = self.instance.pk
self.instance = self.update_model(unified_job_id)
if not self.instance:
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
return True
if self.instance.cancel_flag or self.instance.status == 'canceled':
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
if cancel_wait > 5:
logger.warn('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
return True
return False
def finished_callback(self, runner_obj):
"""
Ansible runner callback triggered on finished run
"""
event_data = {
'event': 'EOF',
'final_counter': self.event_ct,
'guid': self.guid,
}
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
def status_handler(self, status_data, runner_config):
"""
Ansible runner callback triggered on status transition
"""
if status_data['status'] == 'starting':
job_env = dict(runner_config.env)
'''
Take the safe environment variables and overwrite
'''
for k, v in self.safe_env.items():
if k in job_env:
job_env[k] = v
from awx.main.signals import disable_activity_stream # Circular import
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
elif status_data['status'] == 'error':
result_traceback = status_data.get('result_traceback', None)
if result_traceback:
from awx.main.signals import disable_activity_stream # Circular import
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
@with_path_cleanup
def run(self, pk, **kwargs):
"""
Run the job/task and capture its output.
"""
self.instance = self.model.objects.get(pk=pk)
if self.instance.execution_environment_id is None:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running")
status, rc = 'error', None
extra_update_fields = {}
fact_modification_times = {}
self.event_ct = 0
'''
Needs to be an object property because status_handler uses it in a callback context
'''
self.safe_env = {}
self.safe_cred_env = {}
private_data_dir = None
# store a reference to the parent workflow job (if any) so we can include
# it in event data JSON
if self.instance.spawned_by_workflow:
self.parent_workflow_job_id = self.instance.get_workflow_job().id
self.job_created = str(self.instance.created)
try:
self.instance.send_notification_templates("running")
private_data_dir = self.build_private_data_dir(self.instance)
self.pre_run_hook(self.instance, private_data_dir)
self.instance.log_lifecycle("preparing_playbook")
if self.instance.cancel_flag:
self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running':
# Stop the task chain and prevent starting the job if it has
# already been canceled.
self.instance = self.update_model(pk)
status = self.instance.status
raise RuntimeError('not starting %s task' % self.instance.status)
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if getattr(self.instance, 'use_fact_cache', False):
self.instance.start_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
fact_modification_times,
)
# May have to serialize the value
private_data_files = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs)
self.build_extra_vars_file(self.instance, private_data_dir)
args = self.build_args(self.instance, private_data_dir, passwords)
env = self.build_env(self.instance, private_data_dir, private_data_files=private_data_files)
self.safe_env = build_safe_env(env)
credentials = self.build_credentials_list(self.instance)
for credential in credentials:
if credential:
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)
self.safe_env.update(self.safe_cred_env)
self.write_args_file(private_data_dir, args)
password_prompts = self.get_password_prompts(passwords)
expect_passwords = self.create_expect_passwords_data_struct(password_prompts, passwords)
params = {
'ident': self.instance.id,
'private_data_dir': private_data_dir,
'playbook': self.build_playbook_path_relative_to_cwd(self.instance, private_data_dir),
'inventory': self.build_inventory(self.instance, private_data_dir),
'passwords': expect_passwords,
'envvars': env,
'settings': {
'job_timeout': self.get_instance_timeout(self.instance),
'suppress_ansible_output': True,
},
}
idle_timeout = getattr(settings, 'DEFAULT_JOB_IDLE_TIMEOUT', 0)
if idle_timeout > 0:
params['settings']['idle_timeout'] = idle_timeout
if isinstance(self.instance, AdHocCommand):
params['module'] = self.build_module_name(self.instance)
params['module_args'] = self.build_module_args(self.instance)
if getattr(self.instance, 'use_fact_cache', False):
# Enable Ansible fact cache.
params['fact_cache_type'] = 'jsonfile'
else:
# Disable Ansible fact cache.
params['fact_cache_type'] = ''
if self.instance.is_container_group_task or settings.IS_K8S:
params['envvars'].pop('HOME', None)
'''
Delete parameters if the values are None or empty array
'''
for v in ['passwords', 'playbook', 'inventory']:
if not params[v]:
del params[v]
self.dispatcher = CallbackQueueDispatcher()
self.instance.log_lifecycle("running_playbook")
if isinstance(self.instance, SystemJob):
res = ansible_runner.interface.run(
project_dir=settings.BASE_DIR,
event_handler=self.event_handler,
finished_callback=self.finished_callback,
status_handler=self.status_handler,
cancel_callback=self.cancel_callback,
**params,
)
else:
receptor_job = AWXReceptorJob(self, params)
res = receptor_job.run()
self.unit_id = receptor_job.unit_id
if not res:
return
status = res.status
rc = res.rc
if status == 'timeout':
self.instance.job_explanation = "Job terminated due to timeout"
status = 'failed'
extra_update_fields['job_explanation'] = self.instance.job_explanation
# ensure failure notification sends even if playbook_on_stats event is not triggered
handle_success_and_failure_notifications.apply_async([self.instance.job.id])
except ReceptorNodeNotFound as exc:
extra_update_fields['job_explanation'] = str(exc)
except Exception:
# this could catch programming or file system errors
extra_update_fields['result_traceback'] = traceback.format_exc()
logger.exception('%s Exception occurred while running task', self.instance.log_format)
finally:
logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.event_ct)
try:
self.post_run_hook(self.instance, status)
except PostRunError as exc:
if status == 'successful':
status = exc.status
extra_update_fields['job_explanation'] = exc.args[0]
if exc.tb:
extra_update_fields['result_traceback'] = exc.tb
except Exception:
logger.exception('{} Post run hook errored.'.format(self.instance.log_format))
self.instance = self.update_model(pk)
self.instance = self.update_model(pk, status=status, emitted_events=self.event_ct, **extra_update_fields)
try:
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
self.instance.websocket_emit_status(status)
if status != 'successful':
if status == 'canceled':
raise AwxTaskError.TaskCancel(self.instance, rc)
else:
raise AwxTaskError.TaskError(self.instance, rc)
@task(queue=get_local_queuename)
class RunJob(BaseTask):
"""
Run a job using ansible-playbook.
"""
model = Job
event_model = JobEvent
event_data_key = 'job_id'
def build_private_data(self, job, private_data_dir):
"""
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
private_data = {'credentials': {}}
for credential in job.credentials.prefetch_related('input_sources__source_credential').all():
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
if credential.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, job, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user, sudo/su
and ansible-vault.
"""
passwords = super(RunJob, self).build_passwords(job, runtime_passwords)
cred = job.machine_credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password', 'vault_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
for cred in job.vault_credentials:
field = 'vault_password'
vault_id = cred.get_input('vault_id', default=None)
if vault_id:
field = 'vault_password.{}'.format(vault_id)
if field in passwords:
raise RuntimeError('multiple vault credentials were specified with --vault-id {}@prompt'.format(vault_id))
value = runtime_passwords.get(field, cred.get_input('vault_password', default=''))
if value not in ('', 'ASK'):
passwords[field] = value
'''
Only 1 value can be provided for a unique prompt string. Prefer ssh
key unlock over network key unlock.
'''
if 'ssh_key_unlock' not in passwords:
for cred in job.network_credentials:
if cred.inputs.get('ssh_key_unlock'):
passwords['ssh_key_unlock'] = runtime_passwords.get('ssh_key_unlock', cred.get_input('ssh_key_unlock', default=''))
break
return passwords
def build_env(self, job, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunJob, self).build_env(job, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Set environment variables needed for inventory and job event
# callbacks to work.
env['JOB_ID'] = str(job.pk)
env['INVENTORY_ID'] = str(job.inventory.pk)
if job.project:
env['PROJECT_REVISION'] = job.project.scm_revision
env['ANSIBLE_RETRY_FILES_ENABLED'] = "False"
env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)
if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:
env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
env['AWX_HOST'] = settings.TOWER_URL_BASE
# Create a directory for ControlPath sockets that is unique to each job
cp_dir = os.path.join(private_data_dir, 'cp')
if not os.path.exists(cp_dir):
os.mkdir(cp_dir, 0o700)
# FIXME: more elegant way to manage this path in container
env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = '/runner/cp'
# Set environment variables for cloud credentials.
cred_files = private_data_files.get('credentials', {})
for cloud_cred in job.cloud_credentials:
if cloud_cred and cloud_cred.credential_type.namespace == 'openstack' and cred_files.get(cloud_cred, ''):
env['OS_CLIENT_CONFIG_FILE'] = to_container_path(cred_files.get(cloud_cred, ''), private_data_dir)
for network_cred in job.network_credentials:
env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')
env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='')
ssh_keyfile = cred_files.get(network_cred, '')
if ssh_keyfile:
env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile
authorize = network_cred.get_input('authorize', default=False)
env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))
if authorize:
env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')
path_vars = (
('ANSIBLE_COLLECTIONS_PATHS', 'collections_paths', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),
('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'),
)
config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), list(map(lambda x: x[1], path_vars)))
for env_key, config_setting, folder, default in path_vars:
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def build_args(self, job, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
creds = job.machine_credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible-playbook's default of using
# the current user.
ssh_username = ssh_username or 'root'
args = []
if job.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
if job.become_enabled:
args.append('--become')
if job.diff_mode:
args.append('--diff')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
# Support prompting for multiple vault passwords
for k, v in passwords.items():
if k.startswith('vault_password'):
if k == 'vault_password':
args.append('--ask-vault-pass')
else:
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
args.append('--vault-id')
args.append('{}@prompt'.format(vault_id))
if job.forks:
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
args.append('--forks=%d' % settings.MAX_FORKS)
else:
args.append('--forks=%d' % job.forks)
if job.force_handlers:
args.append('--force-handlers')
if job.limit:
args.extend(['-l', job.limit])
if job.verbosity:
args.append('-%s' % ('v' * min(5, job.verbosity)))
if job.job_tags:
args.extend(['-t', job.job_tags])
if job.skip_tags:
args.append('--skip-tags=%s' % job.skip_tags)
if job.start_at_task:
args.append('--start-at-task=%s' % job.start_at_task)
return args
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return job.playbook
def build_extra_vars_file(self, job, private_data_dir):
# Define special extra_vars for AWX, combine with job.extra_vars.
extra_vars = job.awx_meta_vars()
if job.extra_vars_dict:
extra_vars.update(json.loads(job.decrypted_extra_vars()))
# By default, all extra vars disallow Jinja2 template usage for
# security reasons; top level key-values defined in JT.extra_vars, however,
# are allowed as "safe" (because they can only be set by users with
# higher levels of privilege - those that have the ability create and
# edit Job Templates)
safe_dict = {}
if job.job_template and settings.ALLOW_JINJA_IN_EXTRA_VARS == 'template':
safe_dict = job.job_template.extra_vars_dict
return self._write_extra_vars_file(private_data_dir, extra_vars, safe_dict)
def build_credentials_list(self, job):
return job.credentials.prefetch_related('input_sources__source_credential').all()
def get_password_prompts(self, passwords={}):
d = super(RunJob, self).get_password_prompts(passwords)
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
d[r'Vault password:\s*?$'] = 'vault_password'
for k, v in passwords.items():
if k.startswith('vault_password.'):
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
d[r'Vault password \({}\):\s*?$'.format(vault_id)] = k
return d
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunJob, self).build_execution_environment_params(instance, private_data_dir)
# If this has an insights agent and it is not already mounted then show it
insights_dir = os.path.dirname(settings.INSIGHTS_SYSTEM_ID_FILE)
if instance.use_fact_cache and os.path.exists(insights_dir):
logger.info('not parent of others')
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{insights_dir}:{insights_dir}:Z",
]
)
return params
def pre_run_hook(self, job, private_data_dir):
super(RunJob, self).pre_run_hook(job, private_data_dir)
if job.inventory is None:
error = _('Job could not start because it does not have a valid inventory.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.project is None:
error = _('Job could not start because it does not have a valid project.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.execution_environment is None:
error = _('Job could not start because no Execution Environment could be found.')
self.update_model(job.pk, status='error', job_explanation=error)
raise RuntimeError(error)
elif job.project.status in ('error', 'failed'):
msg = _('The project revision for this job template is unknown due to a failed update.')
job = self.update_model(job.pk, status='failed', job_explanation=msg)
raise RuntimeError(msg)
project_path = job.project.get_project_path(check_if_exists=False)
job_revision = job.project.scm_revision
sync_needs = []
source_update_tag = 'update_{}'.format(job.project.scm_type)
branch_override = bool(job.scm_branch and job.scm_branch != job.project.scm_branch)
if not job.project.scm_type:
pass # manual projects are not synced, user has responsibility for that
elif not os.path.exists(project_path):
logger.debug('Performing fresh clone of {} on this instance.'.format(job.project))
sync_needs.append(source_update_tag)
elif job.project.scm_type == 'git' and job.project.scm_revision and (not branch_override):
try:
git_repo = git.Repo(project_path)
if job_revision == git_repo.head.commit.hexsha:
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
else:
sync_needs.append(source_update_tag)
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
else:
logger.debug('Project not available locally, {} will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
has_cache = os.path.exists(os.path.join(job.project.get_cache_path(), job.project.cache_id))
# Galaxy requirements are not supported for manual projects
if job.project.scm_type and ((not has_cache) or branch_override):
sync_needs.extend(['install_roles', 'install_collections'])
if sync_needs:
pu_ig = job.instance_group
pu_en = Instance.objects.me().hostname
sync_metafields = dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
instance_group=pu_ig,
execution_node=pu_en,
controller_node=pu_en,
celery_task_id=job.celery_task_id,
)
if branch_override:
sync_metafields['scm_branch'] = job.scm_branch
sync_metafields['scm_clean'] = True # to accomidate force pushes
if 'update_' not in sync_metafields['job_tags']:
sync_metafields['scm_revision'] = job_revision
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
local_project_sync.log_lifecycle("controller_node_chosen")
local_project_sync.log_lifecycle("execution_node_chosen")
create_partition(local_project_sync.event_class._meta.db_table, start=local_project_sync.created)
# save the associated job before calling run() so that a
# cancel() call on the job can cancel the project update
job = self.update_model(job.pk, project_update=local_project_sync)
project_update_task = local_project_sync._get_task_class()
try:
# the job private_data_dir is passed so sync can download roles and collections there
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
job = self.update_model(job.pk, scm_revision=local_project_sync.scm_revision)
except Exception:
local_project_sync.refresh_from_db()
if local_project_sync.status != 'canceled':
job = self.update_model(
job.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
job.refresh_from_db()
if job.cancel_flag:
return
else:
# Case where a local sync is not needed, meaning that local tree is
# up-to-date with project, job is running project current version
if job_revision:
job = self.update_model(job.pk, scm_revision=job_revision)
# Project update does not copy the folder, so copy here
RunProjectUpdate.make_local_copy(job.project, private_data_dir, scm_revision=job_revision)
if job.inventory.kind == 'smart':
# cache smart inventory memberships so that the host_filter query is not
# ran inside of the event saving code
update_smart_memberships_for_inventory(job.inventory)
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
if not private_data_dir:
# If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method
return
if job.use_fact_cache:
job.finish_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', 'fact_cache'),
fact_modification_times,
)
try:
inventory = job.inventory
except Inventory.DoesNotExist:
pass
else:
if inventory is not None:
update_inventory_computed_fields.delay(inventory.id)
@task(queue=get_local_queuename)
class RunProjectUpdate(BaseTask):
model = ProjectUpdate
event_model = ProjectUpdateEvent
event_data_key = 'project_update_id'
def __init__(self, *args, job_private_data_dir=None, **kwargs):
super(RunProjectUpdate, self).__init__(*args, **kwargs)
self.playbook_new_revision = None
self.original_branch = None
self.job_private_data_dir = job_private_data_dir
def event_handler(self, event_data):
super(RunProjectUpdate, self).event_handler(event_data)
returned_data = event_data.get('event_data', {})
if returned_data.get('task_action', '') == 'set_fact':
returned_facts = returned_data.get('res', {}).get('ansible_facts', {})
if 'scm_version' in returned_facts:
self.playbook_new_revision = returned_facts['scm_version']
def build_private_data(self, project_update, private_data_dir):
"""
Return SSH private key data needed for this project update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
"""
private_data = {'credentials': {}}
if project_update.credential:
credential = project_update.credential
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
return private_data
def build_passwords(self, project_update, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key unlock and SCM
username/password.
"""
passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)
if project_update.credential:
passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')
passwords['scm_username'] = project_update.credential.get_input('username', default='')
passwords['scm_password'] = project_update.credential.get_input('password', default='')
return passwords
def build_env(self, project_update, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir, private_data_files=private_data_files)
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
env['ANSIBLE_ASK_PASS'] = str(False)
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
env['DISPLAY'] = '' # Prevent stupid password popup when running tests.
# give ansible a hint about the intended tmpdir to work around issues
# like https://github.com/ansible/ansible/issues/30064
env['TMP'] = settings.AWX_ISOLATION_BASE_PATH
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
if settings.GALAXY_IGNORE_CERTS:
env['ANSIBLE_GALAXY_IGNORE'] = True
# build out env vars for Galaxy credentials (in order)
galaxy_server_list = []
if project_update.project.organization:
for i, cred in enumerate(project_update.project.organization.galaxy_credentials.all()):
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_URL'] = cred.get_input('url')
auth_url = cred.get_input('auth_url', default=None)
token = cred.get_input('token', default=None)
if token:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_TOKEN'] = token
if auth_url:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_AUTH_URL'] = auth_url
galaxy_server_list.append(f'server{i}')
if galaxy_server_list:
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join(galaxy_server_list)
return env
def _build_scm_url_extra_vars(self, project_update):
"""
Helper method to build SCM url and extra vars with parameters needed
for authentication.
"""
extra_vars = {}
if project_update.credential:
scm_username = project_update.credential.get_input('username', default='')
scm_password = project_update.credential.get_input('password', default='')
else:
scm_username = ''
scm_password = ''
scm_type = project_update.scm_type
scm_url = update_scm_url(scm_type, project_update.scm_url, check_special_cases=False)
scm_url_parts = urlparse.urlsplit(scm_url)
# Prefer the username/password in the URL, if provided.
scm_username = scm_url_parts.username or scm_username
scm_password = scm_url_parts.password or scm_password
if scm_username:
if scm_type == 'svn':
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_password = False
if scm_url_parts.scheme != 'svn+ssh':
scm_username = False
elif scm_url_parts.scheme.endswith('ssh'):
scm_password = False
elif scm_type in ('insights', 'archive'):
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_url = update_scm_url(scm_type, scm_url, scm_username, scm_password, scp_format=True)
else:
scm_url = update_scm_url(scm_type, scm_url, scp_format=True)
# Pass the extra accept_hostkey parameter to the git module.
if scm_type == 'git' and scm_url_parts.scheme.endswith('ssh'):
extra_vars['scm_accept_hostkey'] = 'true'
return scm_url, extra_vars
def build_inventory(self, instance, private_data_dir):
return 'localhost,'
def build_args(self, project_update, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
args = []
if getattr(settings, 'PROJECT_UPDATE_VVV', False):
args.append('-vvv')
if project_update.job_tags:
args.extend(['-t', project_update.job_tags])
return args
def build_extra_vars_file(self, project_update, private_data_dir):
extra_vars = {}
scm_url, extra_vars_new = self._build_scm_url_extra_vars(project_update)
extra_vars.update(extra_vars_new)
scm_branch = project_update.scm_branch
if project_update.job_type == 'run' and (not project_update.branch_override):
if project_update.project.scm_revision:
scm_branch = project_update.project.scm_revision
elif not scm_branch:
raise RuntimeError('Could not determine a revision to run from project.')
elif not scm_branch:
scm_branch = 'HEAD'
galaxy_creds_are_defined = project_update.project.organization and project_update.project.organization.galaxy_credentials.exists()
if not galaxy_creds_are_defined and (settings.AWX_ROLES_ENABLED or settings.AWX_COLLECTIONS_ENABLED):
logger.warning('Galaxy role/collection syncing is enabled, but no ' f'credentials are configured for {project_update.project.organization}.')
extra_vars.update(
{
'projects_root': settings.PROJECTS_ROOT.rstrip('/'),
'local_path': os.path.basename(project_update.project.local_path),
'project_path': project_update.get_project_path(check_if_exists=False), # deprecated
'insights_url': settings.INSIGHTS_URL_BASE,
'awx_license_type': get_license().get('license_type', 'UNLICENSED'),
'awx_version': get_awx_version(),
'scm_url': scm_url,
'scm_branch': scm_branch,
'scm_clean': project_update.scm_clean,
'scm_track_submodules': project_update.scm_track_submodules,
'roles_enabled': galaxy_creds_are_defined and settings.AWX_ROLES_ENABLED,
'collections_enabled': galaxy_creds_are_defined and settings.AWX_COLLECTIONS_ENABLED,
}
)
# apply custom refspec from user for PR refs and the like
if project_update.scm_refspec:
extra_vars['scm_refspec'] = project_update.scm_refspec
elif project_update.project.allow_override:
# If branch is override-able, do extra fetch for all branches
extra_vars['scm_refspec'] = 'refs/heads/*:refs/remotes/origin/*'
if project_update.scm_type == 'archive':
# for raw archive, prevent error moving files between volumes
extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp')
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
return os.path.join('project_update.yml')
def get_password_prompts(self, passwords={}):
d = super(RunProjectUpdate, self).get_password_prompts(passwords)
d[r'Username for.*:\s*?$'] = 'scm_username'
d[r'Password for.*:\s*?$'] = 'scm_password'
d[r'Password:\s*?$'] = 'scm_password'
d[r'\S+?@\S+?\'s\s+?password:\s*?$'] = 'scm_password'
d[r'Enter passphrase for .*:\s*?$'] = 'scm_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
# FIXME: Configure whether we should auto accept host keys?
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
return d
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
scm_revision = project_update.project.scm_revision
inv_update_class = InventoryUpdate._get_task_class()
for inv_src in dependent_inventory_sources:
if not inv_src.update_on_project_update:
continue
if inv_src.scm_last_revision == scm_revision:
logger.debug('Skipping SCM inventory update for `{}` because ' 'project has not changed.'.format(inv_src.name))
continue
logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name))
with transaction.atomic():
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
continue
if settings.IS_K8S:
instance_group = InventoryUpdate(inventory_source=inv_src).preferred_instance_groups[0]
else:
instance_group = project_update.instance_group
local_inv_update = inv_src.create_inventory_update(
_eager_fields=dict(
launch_type='scm',
status='running',
instance_group=instance_group,
execution_node=project_update.execution_node,
controller_node=project_update.execution_node,
source_project_update=project_update,
celery_task_id=project_update.celery_task_id,
)
)
local_inv_update.log_lifecycle("controller_node_chosen")
local_inv_update.log_lifecycle("execution_node_chosen")
try:
create_partition(local_inv_update.event_class._meta.db_table, start=local_inv_update.created)
inv_update_class().run(local_inv_update.id)
except Exception:
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
try:
project_update.refresh_from_db()
except ProjectUpdate.DoesNotExist:
logger.warning('Project update deleted during updates of dependent SCM inventory sources.')
break
try:
local_inv_update.refresh_from_db()
except InventoryUpdate.DoesNotExist:
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
continue
if project_update.cancel_flag:
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
break
if local_inv_update.cancel_flag:
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
if local_inv_update.status == 'successful':
inv_src.scm_last_revision = scm_revision
inv_src.save(update_fields=['scm_last_revision'])
def release_lock(self, instance):
try:
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
except IOError as e:
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror))
os.close(self.lock_fd)
raise
os.close(self.lock_fd)
self.lock_fd = None
'''
Note: We don't support blocking=False
'''
def acquire_lock(self, instance, blocking=True):
lock_path = instance.get_lock_file()
if lock_path is None:
# If from migration or someone blanked local_path for any other reason, recoverable by save
instance.save()
lock_path = instance.get_lock_file()
if lock_path is None:
raise RuntimeError(u'Invalid lock file path')
try:
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
except OSError as e:
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
start_time = time.time()
while True:
try:
instance.refresh_from_db(fields=['cancel_flag'])
if instance.cancel_flag:
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
return
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
if e.errno not in (errno.EAGAIN, errno.EACCES):
os.close(self.lock_fd)
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
else:
time.sleep(1.0)
waiting_time = time.time() - start_time
if waiting_time > 1.0:
logger.info('{} spent {} waiting to acquire lock for local source tree ' 'for path {}.'.format(instance.log_format, waiting_time, lock_path))
def pre_run_hook(self, instance, private_data_dir):
super(RunProjectUpdate, self).pre_run_hook(instance, private_data_dir)
# re-create root project folder if a natural disaster has destroyed it
if not os.path.exists(settings.PROJECTS_ROOT):
os.mkdir(settings.PROJECTS_ROOT)
project_path = instance.project.get_project_path(check_if_exists=False)
self.acquire_lock(instance)
self.original_branch = None
if instance.scm_type == 'git' and instance.branch_override:
if os.path.exists(project_path):
git_repo = git.Repo(project_path)
if git_repo.head.is_detached:
self.original_branch = git_repo.head.commit
else:
self.original_branch = git_repo.active_branch
if not os.path.exists(project_path):
os.makedirs(project_path) # used as container mount
stage_path = os.path.join(instance.get_cache_path(), 'stage')
if os.path.exists(stage_path):
logger.warning('{0} unexpectedly existed before update'.format(stage_path))
shutil.rmtree(stage_path)
os.makedirs(stage_path) # presence of empty cache indicates lack of roles or collections
# the project update playbook is not in a git repo, but uses a vendoring directory
# to be consistent with the ansible-runner model,
# that is moved into the runner project folder here
awx_playbooks = self.get_path_to('..', 'playbooks')
copy_tree(awx_playbooks, os.path.join(private_data_dir, 'project'))
@staticmethod
def clear_project_cache(cache_dir, keep_value):
if os.path.isdir(cache_dir):
for entry in os.listdir(cache_dir):
old_path = os.path.join(cache_dir, entry)
if entry not in (keep_value, 'stage'):
# invalidate, then delete
new_path = os.path.join(cache_dir, '.~~delete~~' + entry)
try:
os.rename(old_path, new_path)
shutil.rmtree(new_path)
except OSError:
logger.warning(f"Could not remove cache directory {old_path}")
@staticmethod
def make_local_copy(p, job_private_data_dir, scm_revision=None):
"""Copy project content (roles and collections) to a job private_data_dir
:param object p: Either a project or a project update
:param str job_private_data_dir: The root of the target ansible-runner folder
:param str scm_revision: For branch_override cases, the git revision to copy
"""
project_path = p.get_project_path(check_if_exists=False)
destination_folder = os.path.join(job_private_data_dir, 'project')
if not scm_revision:
scm_revision = p.scm_revision
if p.scm_type == 'git':
git_repo = git.Repo(project_path)
if not os.path.exists(destination_folder):
os.mkdir(destination_folder, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
tmp_branch_name = 'awx_internal/{}'.format(uuid4())
# always clone based on specific job revision
if not p.scm_revision:
raise RuntimeError('Unexpectedly could not determine a revision to run from project.')
source_branch = git_repo.create_head(tmp_branch_name, p.scm_revision)
# git clone must take file:// syntax for source repo or else options like depth will be ignored
source_as_uri = Path(project_path).as_uri()
git.Repo.clone_from(
source_as_uri,
destination_folder,
branch=source_branch,
depth=1,
single_branch=True, # shallow, do not copy full history
)
# submodules copied in loop because shallow copies from local HEADs are ideal
# and no git clone submodule options are compatible with minimum requirements
for submodule in git_repo.submodules:
subrepo_path = os.path.abspath(os.path.join(project_path, submodule.path))
subrepo_destination_folder = os.path.abspath(os.path.join(destination_folder, submodule.path))
subrepo_uri = Path(subrepo_path).as_uri()
git.Repo.clone_from(subrepo_uri, subrepo_destination_folder, depth=1, single_branch=True)
# force option is necessary because remote refs are not counted, although no information is lost
git_repo.delete_head(tmp_branch_name, force=True)
else:
copy_tree(project_path, destination_folder, preserve_symlinks=1)
# copy over the roles and collection cache to job folder
cache_path = os.path.join(p.get_cache_path(), p.cache_id)
subfolders = []
if settings.AWX_COLLECTIONS_ENABLED:
subfolders.append('requirements_collections')
if settings.AWX_ROLES_ENABLED:
subfolders.append('requirements_roles')
for subfolder in subfolders:
cache_subpath = os.path.join(cache_path, subfolder)
if os.path.exists(cache_subpath):
dest_subpath = os.path.join(job_private_data_dir, subfolder)
copy_tree(cache_subpath, dest_subpath, preserve_symlinks=1)
logger.debug('{0} {1} prepared {2} from cache'.format(type(p).__name__, p.pk, dest_subpath))
def post_run_hook(self, instance, status):
super(RunProjectUpdate, self).post_run_hook(instance, status)
# To avoid hangs, very important to release lock even if errors happen here
try:
if self.playbook_new_revision:
instance.scm_revision = self.playbook_new_revision
instance.save(update_fields=['scm_revision'])
# Roles and collection folders copy to durable cache
base_path = instance.get_cache_path()
stage_path = os.path.join(base_path, 'stage')
if status == 'successful' and 'install_' in instance.job_tags:
# Clear other caches before saving this one, and if branch is overridden
# do not clear cache for main branch, but do clear it for other branches
self.clear_project_cache(base_path, keep_value=instance.project.cache_id)
cache_path = os.path.join(base_path, instance.cache_id)
if os.path.exists(stage_path):
if os.path.exists(cache_path):
logger.warning('Rewriting cache at {0}, performance may suffer'.format(cache_path))
shutil.rmtree(cache_path)
os.rename(stage_path, cache_path)
logger.debug('{0} wrote to cache at {1}'.format(instance.log_format, cache_path))
elif os.path.exists(stage_path):
shutil.rmtree(stage_path) # cannot trust content update produced
if self.job_private_data_dir:
if status == 'successful':
# copy project folder before resetting to default branch
# because some git-tree-specific resources (like submodules) might matter
self.make_local_copy(instance, self.job_private_data_dir)
if self.original_branch:
# for git project syncs, non-default branches can be problems
# restore to branch the repo was on before this run
try:
self.original_branch.checkout()
except Exception:
# this could have failed due to dirty tree, but difficult to predict all cases
logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format))
finally:
self.release_lock(instance)
p = instance.project
if instance.job_type == 'check' and status not in (
'failed',
'canceled',
):
if self.playbook_new_revision:
p.scm_revision = self.playbook_new_revision
else:
if status == 'successful':
logger.error("{} Could not find scm revision in check".format(instance.log_format))
p.playbook_files = p.playbooks
p.inventory_files = p.inventories
p.save(update_fields=['scm_revision', 'playbook_files', 'inventory_files'])
# Update any inventories that depend on this project
dependent_inventory_sources = p.scm_inventory_sources.filter(update_on_project_update=True)
if len(dependent_inventory_sources) > 0:
if status == 'successful' and instance.launch_type != 'sync':
self._update_dependent_inventories(instance, dependent_inventory_sources)
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunProjectUpdate, self).build_execution_environment_params(instance, private_data_dir)
project_path = instance.get_project_path(check_if_exists=False)
cache_path = instance.get_cache_path()
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{project_path}:{project_path}:Z",
f"{cache_path}:{cache_path}:Z",
]
)
return params
@task(queue=get_local_queuename)
class RunInventoryUpdate(BaseTask):
model = InventoryUpdate
event_model = InventoryUpdateEvent
event_data_key = 'inventory_update_id'
def build_private_data(self, inventory_update, private_data_dir):
"""
Return private data needed for inventory update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
If no private data is needed, return None.
"""
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
return injector.build_private_data(inventory_update, private_data_dir)
def build_env(self, inventory_update, private_data_dir, private_data_files=None):
"""Build environment dictionary for ansible-inventory.
Most environment variables related to credentials or configuration
are accomplished by the inventory source injectors (in this method)
or custom credential type injectors (in main run method).
"""
env = super(RunInventoryUpdate, self).build_env(inventory_update, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Pass inventory source ID to inventory script.
env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id)
env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk)
env.update(STANDARD_INVENTORY_UPDATE_ENV)
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
if injector is not None:
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
if inventory_update.source == 'scm':
for env_k in inventory_update.source_vars_dict:
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLOCKED:
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
elif inventory_update.source == 'file':
raise NotImplementedError('Cannot update file sources through the task system.')
if inventory_update.source == 'scm' and inventory_update.source_project_update:
env_key = 'ANSIBLE_COLLECTIONS_PATHS'
config_setting = 'collections_paths'
folder = 'requirements_collections'
default = '~/.ansible/collections:/usr/share/ansible/collections'
config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), [config_setting])
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
if 'ANSIBLE_COLLECTIONS_PATHS' in env:
paths = env['ANSIBLE_COLLECTIONS_PATHS'].split(':')
else:
paths = ['~/.ansible/collections', '/usr/share/ansible/collections']
paths.append('/usr/share/automation-controller/collections')
env['ANSIBLE_COLLECTIONS_PATHS'] = os.pathsep.join(paths)
return env
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_args(self, inventory_update, private_data_dir, passwords):
"""Build the command line argument list for running an inventory
import.
"""
# Get the inventory source and inventory.
inventory_source = inventory_update.inventory_source
inventory = inventory_source.inventory
if inventory is None:
raise RuntimeError('Inventory Source is not associated with an Inventory.')
args = ['ansible-inventory', '--list', '--export']
# Add arguments for the source inventory file/script/thing
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
container_location = os.path.join(CONTAINER_ROOT, rel_path)
source_location = os.path.join(private_data_dir, rel_path)
args.append('-i')
args.append(container_location)
args.append('--output')
args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))
if os.path.isdir(source_location):
playbook_dir = container_location
else:
playbook_dir = os.path.dirname(container_location)
args.extend(['--playbook-dir', playbook_dir])
if inventory_update.verbosity:
args.append('-' + 'v' * min(5, inventory_update.verbosity * 2 + 1))
return args
def build_inventory(self, inventory_update, private_data_dir):
return None # what runner expects in order to not deal with inventory
def pseudo_build_inventory(self, inventory_update, private_data_dir):
"""Inventory imports are ran through a management command
we pass the inventory in args to that command, so this is not considered
to be "Ansible" inventory (by runner) even though it is
Eventually, we would like to cut out the management command,
and thus use this as the real inventory
"""
src = inventory_update.source
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[src]()
if injector is not None:
content = injector.inventory_contents(inventory_update, private_data_dir)
# must be a statically named file
inventory_path = os.path.join(private_data_dir, 'inventory', injector.filename)
with open(inventory_path, 'w') as f:
f.write(content)
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
rel_path = os.path.join('inventory', injector.filename)
elif src == 'scm':
rel_path = os.path.join('project', inventory_update.source_path)
return rel_path
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
return None
def build_credentials_list(self, inventory_update):
# All credentials not used by inventory source injector
return inventory_update.get_extra_credentials()
def pre_run_hook(self, inventory_update, private_data_dir):
super(RunInventoryUpdate, self).pre_run_hook(inventory_update, private_data_dir)
source_project = None
if inventory_update.inventory_source:
source_project = inventory_update.inventory_source.source_project
if (
inventory_update.source == 'scm' and inventory_update.launch_type != 'scm' and source_project and source_project.scm_type
): # never ever update manual projects
# Check if the content cache exists, so that we do not unnecessarily re-download roles
sync_needs = ['update_{}'.format(source_project.scm_type)]
has_cache = os.path.exists(os.path.join(source_project.get_cache_path(), source_project.cache_id))
# Galaxy requirements are not supported for manual projects
if not has_cache:
sync_needs.extend(['install_roles', 'install_collections'])
local_project_sync = source_project.create_project_update(
_eager_fields=dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
execution_node=Instance.objects.me().hostname,
controller_node=Instance.objects.me().hostname,
instance_group=inventory_update.instance_group,
celery_task_id=inventory_update.celery_task_id,
)
)
local_project_sync.log_lifecycle("controller_node_chosen")
local_project_sync.log_lifecycle("execution_node_chosen")
create_partition(local_project_sync.event_class._meta.db_table, start=local_project_sync.created)
# associate the inventory update before calling run() so that a
# cancel() call on the inventory update can cancel the project update
local_project_sync.scm_inventory_updates.add(inventory_update)
project_update_task = local_project_sync._get_task_class()
try:
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
inventory_update.inventory_source.scm_last_revision = local_project_sync.scm_revision
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
except Exception:
inventory_update = self.update_model(
inventory_update.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
elif inventory_update.source == 'scm' and inventory_update.launch_type == 'scm' and source_project:
# This follows update, not sync, so make copy here
RunProjectUpdate.make_local_copy(source_project, private_data_dir)
def post_run_hook(self, inventory_update, status):
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
if status != 'successful':
return # nothing to save, step out of the way to allow error reporting
private_data_dir = inventory_update.job_env['AWX_PRIVATE_DATA_DIR']
expected_output = os.path.join(private_data_dir, 'artifacts', 'output.json')
with open(expected_output) as f:
data = json.load(f)
# build inventory save options
options = dict(
overwrite=inventory_update.overwrite,
overwrite_vars=inventory_update.overwrite_vars,
)
src = inventory_update.source
if inventory_update.enabled_var:
options['enabled_var'] = inventory_update.enabled_var
options['enabled_value'] = inventory_update.enabled_value
else:
if getattr(settings, '%s_ENABLED_VAR' % src.upper(), False):
options['enabled_var'] = getattr(settings, '%s_ENABLED_VAR' % src.upper())
if getattr(settings, '%s_ENABLED_VALUE' % src.upper(), False):
options['enabled_value'] = getattr(settings, '%s_ENABLED_VALUE' % src.upper())
if inventory_update.host_filter:
options['host_filter'] = inventory_update.host_filter
if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()):
options['exclude_empty_groups'] = True
if getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper(), False):
options['instance_id_var'] = getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper())
# Verbosity is applied to saving process, as well as ansible-inventory CLI option
if inventory_update.verbosity:
options['verbosity'] = inventory_update.verbosity
handler = SpecialInventoryHandler(
self.event_handler,
self.cancel_callback,
verbosity=inventory_update.verbosity,
job_timeout=self.get_instance_timeout(self.instance),
start_time=inventory_update.started,
counter=self.event_ct,
initial_line=self.end_line,
)
inv_logger = logging.getLogger('awx.main.commands.inventory_import')
formatter = inv_logger.handlers[0].formatter
formatter.job_start = inventory_update.started
handler.formatter = formatter
inv_logger.handlers[0] = handler
from awx.main.management.commands.inventory_import import Command as InventoryImportCommand
cmd = InventoryImportCommand()
try:
# save the inventory data to database.
# canceling exceptions will be handled in the global post_run_hook
cmd.perform_update(options, data, inventory_update)
except PermissionDenied as exc:
logger.exception('License error saving {} content'.format(inventory_update.log_format))
raise PostRunError(str(exc), status='error')
except PostRunError:
logger.exception('Error saving {} content, rolling back changes'.format(inventory_update.log_format))
raise
except Exception:
logger.exception('Exception saving {} content, rolling back changes.'.format(inventory_update.log_format))
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
@task(queue=get_local_queuename)
class RunAdHocCommand(BaseTask):
"""
Run an ad hoc command using ansible.
"""
model = AdHocCommand
event_model = AdHocCommandEvent
event_data_key = 'ad_hoc_command_id'
def build_private_data(self, ad_hoc_command, private_data_dir):
"""
Return SSH private key data needed for this ad hoc command (only if
stored in DB as ssh_key_data).
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
creds = ad_hoc_command.credential
private_data = {'credentials': {}}
if creds and creds.has_input('ssh_key_data'):
private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='')
if creds and creds.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, ad_hoc_command, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user and
sudo/su.
"""
passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords)
cred = ad_hoc_command.credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
return passwords
def build_env(self, ad_hoc_command, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible.
"""
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, private_data_files=private_data_files)
# Set environment variables needed for inventory and ad hoc event
# callbacks to work.
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
env['INVENTORY_ID'] = str(ad_hoc_command.inventory.pk)
env['INVENTORY_HOSTVARS'] = str(True)
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
return env
def build_args(self, ad_hoc_command, private_data_dir, passwords):
"""
Build command line argument list for running ansible, optionally using
ssh-agent for public/private key authentication.
"""
creds = ad_hoc_command.credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible's default of using the
# current user.
ssh_username = ssh_username or 'root'
args = []
if ad_hoc_command.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
# We only specify sudo/su user and password if explicitly given by the
# credential. Credential should never specify both sudo and su.
if ad_hoc_command.become_enabled:
args.append('--become')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
if ad_hoc_command.forks: # FIXME: Max limit?
args.append('--forks=%d' % ad_hoc_command.forks)
if ad_hoc_command.diff_mode:
args.append('--diff')
if ad_hoc_command.verbosity:
args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
if ad_hoc_command.limit:
args.append(ad_hoc_command.limit)
else:
args.append('all')
return args
def build_extra_vars_file(self, ad_hoc_command, private_data_dir):
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_module_name(self, ad_hoc_command):
return ad_hoc_command.module_name
def build_module_args(self, ad_hoc_command):
module_args = ad_hoc_command.module_args
if settings.ALLOW_JINJA_IN_EXTRA_VARS != 'always':
module_args = sanitize_jinja(module_args)
return module_args
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def get_password_prompts(self, passwords={}):
d = super(RunAdHocCommand, self).get_password_prompts()
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
return d
@task(queue=get_local_queuename)
class RunSystemJob(BaseTask):
model = SystemJob
event_model = SystemJobEvent
event_data_key = 'system_job_id'
def build_execution_environment_params(self, system_job, private_data_dir):
return {}
def build_args(self, system_job, private_data_dir, passwords):
args = ['awx-manage', system_job.job_type]
try:
# System Job extra_vars can be blank, must be JSON if not blank
if system_job.extra_vars == '':
json_vars = {}
else:
json_vars = json.loads(system_job.extra_vars)
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
if 'days' in json_vars:
args.extend(['--days', str(json_vars.get('days', 60))])
if 'dry_run' in json_vars and json_vars['dry_run']:
args.extend(['--dry-run'])
if system_job.job_type == 'cleanup_jobs':
args.extend(
['--jobs', '--project-updates', '--inventory-updates', '--management-jobs', '--ad-hoc-commands', '--workflow-jobs', '--notifications']
)
except Exception:
logger.exception("{} Failed to parse system job".format(system_job.log_format))
return args
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_env(self, instance, private_data_dir, private_data_files=None):
base_env = super(RunSystemJob, self).build_env(instance, private_data_dir, private_data_files=private_data_files)
# TODO: this is able to run by turning off isolation
# the goal is to run it a container instead
env = dict(os.environ.items())
env.update(base_env)
return env
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def build_inventory(self, instance, private_data_dir):
return None
def _reconstruct_relationships(copy_mapping):
for old_obj, new_obj in copy_mapping.items():
model = type(old_obj)
for field_name in getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []):
field = model._meta.get_field(field_name)
if isinstance(field, ForeignKey):
if getattr(new_obj, field_name, None):
continue
related_obj = getattr(old_obj, field_name)
related_obj = copy_mapping.get(related_obj, related_obj)
setattr(new_obj, field_name, related_obj)
elif field.many_to_many:
for related_obj in getattr(old_obj, field_name).all():
logger.debug('Deep copy: Adding {} to {}({}).{} relationship'.format(related_obj, new_obj, model, field_name))
getattr(new_obj, field_name).add(copy_mapping.get(related_obj, related_obj))
new_obj.save()
@task(queue=get_local_queuename)
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
sub_obj_list = cache.get(uuid)
if sub_obj_list is None:
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
return
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
from awx.api.generics import CopyAPIView
from awx.main.signals import disable_activity_stream
model = getattr(importlib.import_module(model_module), model_name, None)
if model is None:
return
try:
obj = model.objects.get(pk=obj_pk)
new_obj = model.objects.get(pk=new_obj_pk)
creater = User.objects.get(pk=user_pk)
except ObjectDoesNotExist:
logger.warning("Object or user no longer exists.")
return
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
copy_mapping = {}
for sub_obj_setup in sub_obj_list:
sub_model = getattr(importlib.import_module(sub_obj_setup[0]), sub_obj_setup[1], None)
if sub_model is None:
continue
try:
sub_obj = sub_model.objects.get(pk=sub_obj_setup[2])
except ObjectDoesNotExist:
continue
copy_mapping.update(CopyAPIView.copy_model_obj(obj, new_obj, sub_model, sub_obj, creater))
_reconstruct_relationships(copy_mapping)
if permission_check_func:
permission_check_func = getattr(getattr(importlib.import_module(permission_check_func[0]), permission_check_func[1]), permission_check_func[2])
permission_check_func(creater, copy_mapping.values())
if isinstance(new_obj, Inventory):
update_inventory_computed_fields.delay(new_obj.id)
class TransmitterThread(threading.Thread):
def run(self):
self.exc = None
try:
super().run()
except Exception:
self.exc = sys.exc_info()
class AWXReceptorJob:
def __init__(self, task, runner_params=None):
self.task = task
self.runner_params = runner_params
self.unit_id = None
if self.task and not self.task.instance.is_container_group_task:
execution_environment_params = self.task.build_execution_environment_params(self.task.instance, runner_params['private_data_dir'])
self.runner_params.update(execution_environment_params)
if not settings.IS_K8S and self.work_type == 'local' and 'only_transmit_kwargs' not in self.runner_params:
self.runner_params['only_transmit_kwargs'] = True
def run(self):
# We establish a connection to the Receptor socket
receptor_ctl = get_receptor_ctl()
res = None
try:
res = self._run_internal(receptor_ctl)
return res
finally:
# Make sure to always release the work unit if we established it
if self.unit_id is not None and settings.RECEPTOR_RELEASE_WORK:
try:
receptor_ctl.simple_command(f"work release {self.unit_id}")
except Exception:
logger.exception(f"Error releasing work unit {self.unit_id}.")
@property
def sign_work(self):
return False if settings.IS_K8S else True
def _run_internal(self, receptor_ctl):
# Create a socketpair. Where the left side will be used for writing our payload
# (private data dir, kwargs). The right side will be passed to Receptor for
# reading.
sockin, sockout = socket.socketpair()
transmitter_thread = TransmitterThread(target=self.transmit, args=[sockin])
transmitter_thread.start()
# submit our work, passing
# in the right side of our socketpair for reading.
_kw = {}
if self.work_type == 'ansible-runner':
_kw['node'] = self.task.instance.execution_node
use_stream_tls = get_conn_type(_kw['node'], receptor_ctl).name == "STREAMTLS"
_kw['tlsclient'] = get_tls_client(use_stream_tls)
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params, signwork=self.sign_work, **_kw)
self.unit_id = result['unitid']
# Update the job with the work unit in-memory so that the log_lifecycle
# will print out the work unit that is to be associated with the job in the database
# via the update_model() call.
# We want to log the work_unit_id as early as possible. A failure can happen in between
# when we start the job in receptor and when we associate the job <-> work_unit_id.
# In that case, there will be work running in receptor and Controller will not know
# which Job it is associated with.
# We do not programatically handle this case. Ideally, we would handle this with a reaper case.
# The two distinct job lifecycle log events below allow for us to at least detect when this
# edge case occurs. If the lifecycle event work_unit_id_received occurs without the
# work_unit_id_assigned event then this case may have occured.
self.task.instance.work_unit_id = result['unitid'] # Set work_unit_id in-memory only
self.task.instance.log_lifecycle("work_unit_id_received")
self.task.update_model(self.task.instance.pk, work_unit_id=result['unitid'])
self.task.instance.log_lifecycle("work_unit_id_assigned")
sockin.close()
sockout.close()
if transmitter_thread.exc:
raise transmitter_thread.exc[1].with_traceback(transmitter_thread.exc[2])
transmitter_thread.join()
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
# Both "processor" and "cancel_watcher" are spawned in separate threads.
# We wait for the first one to return. If cancel_watcher returns first,
# we yank the socket out from underneath the processor, which will cause it
# to exit. A reference to the processor_future is passed into the cancel_watcher_future,
# Which exits if the job has finished normally. The context manager ensures we do not
# leave any threads laying around.
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
processor_future = executor.submit(self.processor, resultfile)
cancel_watcher_future = executor.submit(self.cancel_watcher, processor_future)
futures = [processor_future, cancel_watcher_future]
first_future = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
res = list(first_future.done)[0].result()
if res.status == 'canceled':
receptor_ctl.simple_command(f"work cancel {self.unit_id}")
resultsock.shutdown(socket.SHUT_RDWR)
resultfile.close()
elif res.status == 'error':
try:
unit_status = receptor_ctl.simple_command(f'work status {self.unit_id}')
detail = unit_status.get('Detail', None)
state_name = unit_status.get('StateName', None)
except Exception:
detail = ''
state_name = ''
logger.exception(f'An error was encountered while getting status for work unit {self.unit_id}')
if 'exceeded quota' in detail:
logger.warn(detail)
log_name = self.task.instance.log_format
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
self.task.update_model(self.task.instance.pk, status='pending')
return
# If ansible-runner ran, but an error occured at runtime, the traceback information
# is saved via the status_handler passed in to the processor.
if state_name == 'Succeeded':
return res
if not self.task.instance.result_traceback:
try:
resultsock = receptor_ctl.get_work_results(self.unit_id, return_sockfile=True)
lines = resultsock.readlines()
receptor_output = b"".join(lines).decode()
if receptor_output:
self.task.instance.result_traceback = receptor_output
self.task.instance.save(update_fields=['result_traceback'])
elif detail:
self.task.instance.result_traceback = detail
self.task.instance.save(update_fields=['result_traceback'])
else:
logger.warn(f'No result details or output from {self.task.instance.log_format}, status:\n{unit_status}')
except Exception:
raise RuntimeError(detail)
return res
# Spawned in a thread so Receptor can start reading before we finish writing, we
# write our payload to the left side of our socketpair.
@cleanup_new_process
def transmit(self, _socket):
try:
ansible_runner.interface.run(streamer='transmit', _output=_socket.makefile('wb'), **self.runner_params)
finally:
# Socket must be shutdown here, or the reader will hang forever.
_socket.shutdown(socket.SHUT_WR)
@cleanup_new_process
def processor(self, resultfile):
return ansible_runner.interface.run(
streamer='process',
quiet=True,
_input=resultfile,
event_handler=self.task.event_handler,
finished_callback=self.task.finished_callback,
status_handler=self.task.status_handler,
**self.runner_params,
)
@property
def receptor_params(self):
if self.task.instance.is_container_group_task:
spec_yaml = yaml.dump(self.pod_definition, explicit_start=True)
receptor_params = {
"secret_kube_pod": spec_yaml,
"pod_pending_timeout": getattr(settings, 'AWX_CONTAINER_GROUP_POD_PENDING_TIMEOUT', "5m"),
}
if self.credential:
kubeconfig_yaml = yaml.dump(self.kube_config, explicit_start=True)
receptor_params["secret_kube_config"] = kubeconfig_yaml
else:
private_data_dir = self.runner_params['private_data_dir']
if self.work_type == 'ansible-runner' and settings.AWX_CLEANUP_PATHS:
# on execution nodes, we rely on the private data dir being deleted
cli_params = f"--private-data-dir={private_data_dir} --delete"
else:
# on hybrid nodes, we rely on the private data dir NOT being deleted
cli_params = f"--private-data-dir={private_data_dir}"
receptor_params = {"params": cli_params}
return receptor_params
@property
def work_type(self):
if self.task.instance.is_container_group_task:
if self.credential:
return 'kubernetes-runtime-auth'
return 'kubernetes-incluster-auth'
if self.task.instance.execution_node == settings.CLUSTER_HOST_ID or self.task.instance.execution_node == self.task.instance.controller_node:
return 'local'
return 'ansible-runner'
@cleanup_new_process
def cancel_watcher(self, processor_future):
while True:
if processor_future.done():
return processor_future.result()
if self.task.cancel_callback():
result = namedtuple('result', ['status', 'rc'])
return result('canceled', 1)
time.sleep(1)
@property
def pod_definition(self):
ee = self.task.instance.execution_environment
default_pod_spec = get_default_pod_spec()
pod_spec_override = {}
if self.task and self.task.instance.instance_group.pod_spec_override:
pod_spec_override = parse_yaml_or_json(self.task.instance.instance_group.pod_spec_override)
pod_spec = {**default_pod_spec, **pod_spec_override}
pod_spec['spec']['containers'][0]['image'] = ee.image
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
# Enforce EE Pull Policy
pull_options = {"always": "Always", "missing": "IfNotPresent", "never": "Never"}
if self.task and self.task.instance.execution_environment:
if self.task.instance.execution_environment.pull:
pod_spec['spec']['containers'][0]['imagePullPolicy'] = pull_options[self.task.instance.execution_environment.pull]
if self.task and self.task.instance.is_container_group_task:
# If EE credential is passed, create an imagePullSecret
if self.task.instance.execution_environment and self.task.instance.execution_environment.credential:
# Create pull secret in k8s cluster based on ee cred
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
pm = PodManager(self.task.instance)
secret_name = pm.create_secret(job=self.task.instance)
# Inject secret name into podspec
pod_spec['spec']['imagePullSecrets'] = [{"name": secret_name}]
if self.task:
pod_spec['metadata'] = deepmerge(
pod_spec.get('metadata', {}),
dict(name=self.pod_name, labels={'ansible-awx': settings.INSTALL_UUID, 'ansible-awx-job-id': str(self.task.instance.id)}),
)
return pod_spec
@property
def pod_name(self):
return f"automation-job-{self.task.instance.id}"
@property
def credential(self):
return self.task.instance.instance_group.credential
@property
def namespace(self):
return self.pod_definition['metadata']['namespace']
@property
def kube_config(self):
host_input = self.credential.get_input('host')
config = {
"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
"users": [{"name": host_input, "user": {"token": self.credential.get_input('bearer_token')}}],
"contexts": [{"name": host_input, "context": {"cluster": host_input, "user": host_input, "namespace": self.namespace}}],
"current-context": host_input,
}
if self.credential.get_input('verify_ssl') and 'ssl_ca_cert' in self.credential.inputs:
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
self.credential.get_input('ssl_ca_cert').encode() # encode to bytes
).decode() # decode the base64 data into a str
else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config
|
worker.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 30 22:18:02 2020
@author: Soundarya Ganesh
"""
import sys
import random
import threading
import json
from socket import *
import time
import numpy as np
from time import *
from datetime import *
import os
w_id = sys.argv[2]
class Task:
def __init__(self, job_id, task_id, remaining_time):
self.job_id = job_id
self.task_id = task_id
self.remaining_time = remaining_time
def reduce(self):
self.remaining_time -= 1
def time_check(self):
remaining_time = self.remaining_time
if (remaining_time == 0):
return 1
else:
return 0
def func_connect_est(path2file, flag, host, port): # 0-incoming 1-outgoing
with open(path2file, "a+") as fp:
if(flag == 0):
print_lock.acquire()
fp.write(str(datetime.now(
)) + "\tIN: Connection Established :host is {0}, port used is {1}]\n".format(host, port))
print_lock.release()
elif(flag == 1):
print_lock.acquire()
fp.write(str(datetime.now(
)) + "\tOUT: Connection Established :host is {0}, port used is {1}]\n".format(host, port))
print_lock.release()
def func_log_worker_start(path2file, i, port, slot):
with open(path2file, "a+") as f:
print_lock.acquire()
f.write(str(datetime.now()) + ":\tWorker has started = [worker_id:{0}, port:{1}, slots:{2}]\n".format(i,
port,slot))
print_lock.release()
def func_log_update_exec(path2file,flag, job_id, task_id):
with open(path2file, "a+") as f:
if(flag == 0):
print_lock.acquire()
f.write(str(datetime.now()) + ":\tUpdate sent to master = [job_id:{0}, task_id:{1}] completed\n".format(job_id, task_id))
print_lock.release()
if(flag==1):
print_lock.acquire()
f.write(str(datetime.now()) + ":\tFinished executing task = [job_id:{0}, task_id:{1}]\n".format(job_id, task_id))
print_lock.release()
if(flag==2):
print_lock.acquire()
f.write(str(datetime.now()) + ":\tStarted executing task = [job_id:{0}, task_id:{1}]\n".format(job_id,task_id))
print_lock.release()
def func_log_receive(path2file, job_id, task_id, duration): #check
with open(path2file, "a+") as f:
print_lock.acquire()
f.write(str(datetime.now()) + ":\tReceived task = [job_id:{0}, task_id:{1}, duration:{2}]\n".format(
job_id,
task_id,
duration))
print_lock.release()
def start_execute_task(w_id, task_data):
global job_id, task_id, remaining_time
task = Task(task_data["job_id"],
task_data["task_id"], task_data["duration"])
job_id = task_data["job_id"]
task_id = task_data["task_id"]
remaining_time = task_data["duration"]
n=len(execn_pool)
path2file= "proj_log/worker_" + str(w_id) + ".txt"
func_log_update_exec(path2file,2, task_data["job_id"], task_data["task_id"])
for i in range(n):
if (isinstance(execn_pool[w_id-1][i], int) and (execn_pool[w_id-1][i] == 0 and num_free_slots[w_id-1] > 0)):
execn_pool[w_id-1][i] = task
num_free_slots[w_id-1] -= 1
break
def func_receive_task_start(w_id):
skt = socket(AF_INET, SOCK_STREAM)
with skt:
skt.bind(("localhost", ports[w_id-1]))
skt.listen(1024)
while (1):
connectn, addr = skt.accept()
path2file= "proj_log/worker_"+str(w_id)+".txt"
func_connect_est(path2file, 0, addr[0], addr[1])
with connectn:
task_start_data = connectn.recv(1024).decode()
if task_start_data:
task = json.loads(task_start_data)
path2file= "proj_log/worker_"+str(w_id)+".txt"
func_log_receive(path2file, task["job_id"], task["task_id"], task["duration"])
workerLock.acquire()
start_execute_task(w_id, task)
workerLock.release()
def worker_task_execute(w_id):
while (1):
if(slots[w_id-1] == 0):
continue
for i in range(slots[w_id-1]):
if (isinstance(execn_pool[w_id-1][i], int) and execn_pool[w_id-1][i] == 0):
continue
elif (execn_pool[w_id-1][i].time_check()):
task = execn_pool[w_id-1][i]
job_id = task.job_id
task_id = task.task_id
store_t_data = {
"worker_id": w_id,
"job_id": job_id,
"task_id": task_id
}
path2file= "proj_log/worker_"+str(w_id)+".txt"
func_log_update_exec(path2file,1, job_id, task_id)
workerLock.acquire()
#remove_task(w_id, i)
execn_pool[w_id-1][i] = 0
num_free_slots[w_id-1] += 1
workerLock.release()
t_data = json.dumps(store_t_data)
skt = socket(AF_INET, SOCK_STREAM)
with skt:
skt.connect(('localhost', 5001))
path2file="proj_log/worker_"+str(w_id)+".txt"
func_connect_est(path2file, 1, "localhost", "5001")
skt.send(t_data.encode())
path2file="proj_log/worker_"+str(w_id)+".txt"
func_log_update_exec(path2file,0, job_id, task_id)
else:
workerLock.acquire()
execn_pool[w_id-1][i].reduce()
workerLock.release()
sleep(1)
workerLock = threading.Lock()
print_lock = threading.Lock()
slots = list()
ports = list()
num_free_slots = list()
execn_pool = list()
with open("config.json") as f:
config = json.load(f)
for worker in config['workers']:
ports.append(worker['port'])
slots.append(worker['slots'])
num_free_slots.append(worker['slots'])
execn_pool.append([0 for i in range(worker['slots'])])
count = len(ports)
for i in range(count):
path2file="proj_log/worker_" + str(w_id)+ ".txt"
func_log_worker_start(path2file,i+1,ports[i],slots[i])
if __name__ == "__main__":
try:
os.mkdir('proj_log')
except:
pass
f = open("proj_log/worker_"+w_id+".txt", "w")
f.close()
if (len(sys.argv) != 3):
sys.exit()
worker_port = int(sys.argv[1])
worker_id = int(sys.argv[2])
ports[worker_id - 1] = worker_port
thread_receive_task_start = threading.Thread(
target=func_receive_task_start, args=(worker_id,))
thread_task_execute = threading.Thread(
target=worker_task_execute, args=(worker_id,))
thread_receive_task_start.start()
thread_task_execute.start()
thread_receive_task_start.join()
thread_task_execute.join()
|
proc.py
|
import os
import sys
import logging
import ctypes
import select
import socket
import fcntl
from time import time
from papa import utils, Error
from papa.utils import extract_name_value_pairs, wildcard_iter, cast_bytes, \
send_with_retry
from papa.server.papa_socket import find_socket
from subprocess import Popen, PIPE, STDOUT
from threading import Thread, Lock
from collections import deque, namedtuple
try:
import pwd
except ImportError:
pwd = None
try:
import grp
except ImportError:
grp = None
try:
import resource
except ImportError:
resource = None
try:
# noinspection PyUnresolvedReferences,PyUnboundLocalVariable
FileNotFoundError
except NameError as e:
# noinspection PyShadowingBuiltins
FileNotFoundError = OSError
__author__ = 'Scott Maxwell'
log = logging.getLogger('papa.server')
def convert_size_string_to_bytes(s):
try:
return int(s)
except ValueError:
return int(s[:-1]) * {'g': 1073741824, 'm': 1048576, 'k': 1024}[s[-1].lower()]
class OutputQueue(object):
Item = namedtuple('Item', 'type timestamp data')
STDOUT = 0
STDERR = 1
CLOSED = -1
def __init__(self, bufsize=1048576):
self.lock = Lock()
self.bufsize = bufsize
self.q = deque()
self._used = 0
self._closed = False
def add(self, output_type, data=None):
if not self._closed:
with self.lock:
if not self._closed:
data_tuple = OutputQueue.Item(output_type, time(), data)
if output_type != OutputQueue.CLOSED and data:
if len(data) >= self.bufsize:
self.q.clear()
self._used = len(data)
else:
self._used += len(data)
while self._used > self.bufsize:
first = self.q.popleft()
self._used -= len(first.data)
self.q.append(data_tuple)
def retrieve(self):
if self.q:
with self.lock:
if self.q:
l = list(self.q)
return l[-1].timestamp, l
return 0, None
def remove(self, timestamp):
with self.lock:
q = self.q
while q and q[0].timestamp <= timestamp:
item = q.popleft()
if self._used:
self._used -= len(item.data)
def close(self):
with self.lock:
self.bufsize = 0
self.q = deque()
self._used = 0
self._closed = True
def __len__(self):
return len(self.q)
class Process(object):
"""Wraps a process.
Options:
- **name**: the process name. Multiple processes can share the same name.
- **args**: the arguments for the command to run. Can be a list or
a string. If **args** is a string, it's splitted using
:func:`shlex.split`. Defaults to None.
- **working_dir**: the working directory to run the command in. If
not provided, will default to the current working directory.
- **shell**: if *True*, will run the command in the shell
environment. *False* by default. **warning: this is a
security hazard**.
- **uid**: if given, is the user id or name the command should run
with. The current uid is the default.
- **gid**: if given, is the group id or name the command should run
with. The current gid is the default.
- **env**: a mapping containing the environment variables the command
will run with. Optional.
- **rlimits**: a mapping containing rlimit names and values that will
be set before the command runs.
"""
def __init__(self, name, args, env, rlimits, instance,
working_dir=None, shell=False, uid=None, gid=None,
stdout=1, stderr=1, bufsize='1m'):
self.instance = instance
instance_globals = instance['globals']
self._processes = instance_globals['processes']
self.name = name
self.args = args
self.env = env
self.rlimits = rlimits
self.working_dir = working_dir
self.shell = shell
self.bufsize = convert_size_string_to_bytes(bufsize)
self.pid = 0
self.running = False
self.started = 0
if self.bufsize:
self.out = int(stdout)
self.err = stderr if stderr == 'stdout' else int(stderr)
else:
self.out = self.err = 0
if uid:
if pwd:
try:
self.uid = int(uid)
self.username = pwd.getpwuid(self.uid).pw_name
except KeyError:
raise utils.Error('%r is not a valid user id' % uid)
except ValueError:
try:
self.username = uid
self.uid = pwd.getpwnam(uid).pw_uid
except KeyError:
raise utils.Error('%r is not a valid user name' % uid)
else:
raise utils.Error('uid is not supported on this platform')
else:
self.username = None
self.uid = None
if gid:
if grp:
try:
self.gid = int(gid)
grp.getgrgid(self.gid)
except (KeyError, OverflowError):
raise utils.Error('No such group: %r' % gid)
except ValueError:
try:
self.gid = grp.getgrnam(gid).gr_gid
except KeyError:
raise utils.Error('No such group: %r' % gid)
else:
raise utils.Error('gid is not supported on this platform')
elif self.uid:
self.gid = pwd.getpwuid(self.uid).pw_gid
else:
self.gid = None
# sockets created before fork, should be let go after.
self._worker = None
self._thread = None
self._output = None
self._auto_close = False
def __eq__(self, other):
return (
self.name == other.name and
self.args == other.args and
self.env == other.env and
self.rlimits == other.rlimits and
self.working_dir == other.working_dir and
self.shell == other.shell and
self.out == other.out and
self.err == other.err and
self.bufsize == other.bufsize and
self.uid == other.uid and
self.gid == other.gid
)
def spawn(self):
existing = self._processes.get(self.name)
if existing:
if self == existing:
self.pid = existing.pid
self.running = existing.running
self.started = existing.started
else:
raise utils.Error('Process for {0} has already been created - {1}'.format(self.name, str(existing)))
else:
managed_sockets = []
fixed_args = []
self.started = time()
for arg in self.args:
if '$(socket.' in arg:
start = arg.find('$(socket.') + 9
end = arg.find(')', start)
if end == -1:
raise utils.Error('Process for {0} argument starts with "$(socket." but has no closing parenthesis'.format(self.name))
socket_and_part = arg[start:end]
socket_name, part = socket_and_part.rpartition('.')[::2]
if not part or part not in ('port', 'fileno'):
raise utils.Error('You forgot to specify either ".port" or ".fileno" after the name')
try:
s = find_socket(socket_name, self.instance)
except Exception:
raise utils.Error('Socket {0} not found'.format(socket_name))
if part == 'port':
replacement = s.port
elif s.reuseport:
sock = s.clone_for_reuseport()
managed_sockets.append(sock)
replacement = sock.fileno()
else:
replacement = s.socket.fileno()
arg = '{0}{1}{2}'.format(arg[:start - 9], replacement, arg[end + 1:])
fixed_args.append(arg)
if not fixed_args:
raise utils.Error('No command')
def preexec():
streams = [sys.stdin]
if not self.out:
streams.append(sys.stdout)
if not self.err:
streams.append(sys.stderr)
for stream in streams:
if hasattr(stream, 'fileno'):
try:
stream.flush()
devnull = os.open(os.devnull, os.O_RDWR)
# noinspection PyTypeChecker
os.dup2(devnull, stream.fileno())
# noinspection PyTypeChecker
os.close(devnull)
except IOError:
# some streams, like stdin - might be already closed.
pass
# noinspection PyArgumentList
os.setsid()
if resource:
for limit, value in self.rlimits.items():
resource.setrlimit(limit, (value, value))
if self.gid:
try:
# noinspection PyTypeChecker
os.setgid(self.gid)
except OverflowError:
if not ctypes:
raise
# versions of python < 2.6.2 don't manage unsigned int for
# groups like on osx or fedora
os.setgid(-ctypes.c_int(-self.gid).value)
if self.username is not None:
try:
# noinspection PyTypeChecker
os.initgroups(self.username, self.gid)
except (OSError, AttributeError):
# not support on Mac or 2.6
pass
if self.uid:
# noinspection PyTypeChecker
os.setuid(self.uid)
extra = {}
if self.out:
extra['stdout'] = PIPE
if self.err:
if self.err == 'stdout':
extra['stderr'] = STDOUT
else:
extra['stderr'] = PIPE
try:
self._worker = Popen(fixed_args, preexec_fn=preexec,
close_fds=False, shell=self.shell,
cwd=self.working_dir, env=self.env, bufsize=-1,
**extra)
except FileNotFoundError as file_not_found_exception:
if not os.path.exists(fixed_args[0]):
raise utils.Error('Bad command - {0}'.format(file_not_found_exception))
if self.working_dir and not os.path.isdir(self.working_dir):
raise utils.Error('Bad working_dir - {0}'.format(file_not_found_exception))
raise
# let go of sockets created only for self.worker to inherit
for sock in managed_sockets:
sock.close()
self._processes[self.name] = self
self.pid = self._worker.pid
self._output = OutputQueue(self.bufsize)
log.info('Created process %s', self)
self.running = True
self._thread = Thread(target=self._watch)
self._thread.daemon = True
self._thread.start()
return self
def _watch(self):
pipes = []
stdout = self._worker.stdout
stderr = self._worker.stderr
output = self._output
if self.out:
pipes.append(stdout)
if self.err and self.err != 'stdout':
pipes.append(stderr)
if pipes:
for pipe in pipes:
fd = pipe.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
data = True
while data and not self._auto_close:
out = select.select(pipes, [], [])[0]
for p in out:
data = p.read()
if data:
output.add(OutputQueue.STDOUT if p == stdout else OutputQueue.STDERR, data)
if stdout:
stdout.close()
if stderr:
stderr.close()
out = self._worker.wait()
self.running = False
if self._auto_close:
instance_globals = self.instance['globals']
with instance_globals['lock']:
log.info('Removed process %s', self)
instance_globals['processes'].pop(self.name, None)
else:
output.add(OutputQueue.CLOSED, out)
def __str__(self):
result = ['{0} pid={1} running={2} started={3}'.format(self.name, self.pid, self.running, self.started)]
if self.uid:
result.append('uid={0}'.format(self.uid))
if self.gid:
result.append('gid={0}'.format(self.gid))
if self.shell:
result.append('shell=True')
# if self.env:
# result.extend('env.{0}={1}'.format(key, value) for key, value in self.env.items())
if self.args:
result.append('args={0}'.format(' '.join(self.args)))
return ' '.join(result)
def watch(self):
# noinspection PyTypeChecker
return self._output.retrieve()
def remove_output(self, timestamp):
self._output.remove(timestamp)
def close_output(self):
self._output.close()
self._auto_close = True
if not self.running:
self.instance['globals']['processes'].pop(self.name, None)
# noinspection PyUnusedLocal
def process_command(sock, args, instance):
"""Create a process.
You need to specify a name, followed by name=value pairs for the process
options, followed by the command and args to execute. The name must not contain
spaces.
Process options are:
uid - the username or user ID to use when starting the process
gid - the group name or group ID to use when starting the process
working_dir - must be an absolute path if specified
output - size of each output buffer (default is 1m)
You can also specify environment variables by prefixing the name with 'env.' and
rlimits by prefixing the name with 'rlimit.'
Examples:
make process sf uid=1001 gid=2000 working_dir=/sf/bin/ output=1m /sf/bin/uwsgi --ini uwsgi-live.ini --socket fd://27 --stats 127.0.0.1:8090
make process nginx /usr/local/nginx/sbin/nginx
"""
if not args:
raise Error('Process requires a name')
name = args.pop(0)
env = {}
rlimits = {}
kwargs = {}
for key, value in extract_name_value_pairs(args).items():
if key.startswith('env.'):
env[key[4:]] = value
elif key.startswith('rlimit.'):
key = key[7:]
try:
rlimits[getattr(resource, 'RLIMIT_%s' % key.upper())] = int(value)
except AttributeError:
raise utils.Error('Unknown rlimit "%s"' % key)
except ValueError:
raise utils.Error('The rlimit value for "%s" must be an integer, not "%s"' % (key, value))
else:
kwargs[key] = value
watch = int(kwargs.pop('watch', 0))
p = Process(name, args, env, rlimits, instance, **kwargs)
with instance['globals']['lock']:
result = p.spawn()
if watch:
send_with_retry(sock, cast_bytes('{0}\n'.format(result)))
return _do_watch(sock, {name: {'p': result, 't': 0, 'closed': False}}, instance)
return str(result)
# noinspection PyUnusedLocal
def processes_command(sock, args, instance):
"""List active processes.
You can list processes by name or PID
Examples:
list process 3698
list processes nginx.*
"""
instance_globals = instance['globals']
with instance_globals['lock']:
return '\n'.join(sorted('{0}'.format(proc) for _, proc in wildcard_iter(instance_globals['processes'], args)))
# noinspection PyUnusedLocal
def close_output_command(sock, args, instance):
"""Close the process output channels and automatically remove the process from
the list on completion.
You can remove processes by name or PID
Examples:
remove processes uwsgi
remove process 10
"""
"""Close the process output channels and automatically remove the process when done"""
instance_globals = instance['globals']
with instance_globals['lock']:
for name, p in wildcard_iter(instance_globals['processes'], args, required=True):
p.close_output()
def watch_command(sock, args, instance):
"""Watch a process"""
instance_globals = instance['globals']
all_processes = instance_globals['processes']
with instance_globals['lock']:
procs = dict((name, {'p': proc, 't': 0, 'closed': False}) for name, proc in wildcard_iter(all_processes, args, True))
if not procs:
raise utils.Error('Nothing to watch')
send_with_retry(sock, cast_bytes('Watching {0}\n'.format(len(procs))))
return _do_watch(sock, procs, instance)
if hasattr(select, 'poll'):
class Poller(object):
def __init__(self, sock):
self.p = select.poll()
self.p.register(sock.fileno(), select.POLLHUP)
def poll(self, timeout):
return self.p.poll(timeout * 1000)
else:
class Poller(object):
def __init__(self, sock):
self.sock = sock
def poll(self, timeout):
if not select.select([self.sock], [], [], timeout)[0]:
self.sock.setblocking(0)
try:
b = self.sock.recv(1)
if not b:
return True
except socket.error:
pass
self.sock.setblocking(1)
def _do_watch(sock, procs, instance):
instance_globals = instance['globals']
all_processes = instance_globals['processes']
connection = instance['connection']
poller = Poller(sock)
delay = .1
while True:
data = []
for name, proc in procs.items():
t, l = proc['p'].watch()
if l:
for item in l:
if item.type == OutputQueue.CLOSED:
data.append(cast_bytes('closed:{0}:{1}:{2}'.format(name, item.timestamp, item.data)))
proc['closed'] = True
else:
data.append(cast_bytes('{0}:{1}:{2}:{3}'.format('out' if item.type == OutputQueue.STDOUT else 'err', name, item.timestamp, len(item.data))))
data.append(item.data)
proc['t'] = t
if data:
delay = .05
data.append(b'] ')
out = b'\n'.join(data)
send_with_retry(sock, out)
one_line = connection.readline().lower()
closed = []
for name, proc in procs.items():
t = proc['t']
if t:
proc['p'].remove_output(t)
if proc['closed']:
closed.append(name)
if closed:
with instance_globals['lock']:
for name in closed:
closed_proc = procs.pop(name, None)
if closed_proc and 'p' in closed_proc:
log.info('Removed process %s', closed_proc['p'])
all_processes.pop(name, None)
if not procs:
return 'Nothing left to watch'
if one_line == 'q':
return 'Stopped watching'
else:
if poller.poll(delay):
return 'Client closed connection'
if delay < 1.0:
delay += .05
|
VideoStream.py
|
import cv2
import threading
import time
class VideoStream:
def __init__(self, src=0, name='VideoStream'):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.name = name
self.stopped = False
def start(self):
# start thread to read frames from video stream
t = threading.Thread(target=self.update, name=self.name, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until thread is stopped
while True:
if self.stopped:
return
(self.grabbed, self.frame) = self.stream.read()
def read(self):
return self.frame
def stop(self):
self.stopped=True
|
server.py
|
# Developed By 2017 Computer and Communication Department-Alexandria University graduation project team
#
# Email: EITS@gmail.com
#
# Authors: MOHAMED SHERIF,YAMEN EMAD, SHERINE SAMEH
#
# Copyright (c) EITS TEAM 2017
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#=================================================================================
import datetime
import os
import socket
import subprocess
import sys
import threading
from EITS.dbHandler import dbHandler
from EITS import utilities
HOST = ''
PORT = config.PORT
SOCKET = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
NUMBEROFTHREADS = 10
CONNECTIONS = {}
DB = dbHandler()
def setupConnection():
try:
SOCKET.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
SOCKET.bind((HOST, PORT))
SOCKET.listen(NUMBEROFTHREADS)
except socket.error as errorMsg:
print(errorMsg)
setupConnection()
def acceptConnections():
CONNECTIONS.clear()
TYPES = {
'40307': userCommands,
'90901': adminCommands,
'80702': govCommands,
'90201': connectRP
}
while True:
try:
connection, address = SOCKET.accept()
connection.setblocking(1)
except Exception as e:
print(e)
continue
type = str(connection.recv(5))
thread = threading.Thread(target = TYPES[type] , kwargs={'connection': connection})
thread.daemon = True
thread.start()
print('====================================================================================\n')
print('Connections has been established | IP: ' + address[0] + ' | Port: ' + str(address[1]) + '\n')
print('====================================================================================\n')
def connectRP(connection):
handshakePck = str(connection.recv(90))
print('Handshaking Packet: \n' + handshakePck + '\n')
print('====================================================================================\n')
specs = handshakePck.split(':_:')
DB.addPi(specs[0],specs[1],specs[2],specs[3],specs[4],specs[5])
mac = specs[1]
CONNECTIONS[mac] = connection
def adminCommands(connection):
AdminID = str(connection.recv(5)).decode("utf-8")
cmd = str(connection.recv(2048)).decode("utf-8")
cmd = cmd.split(":_:")
if cmd[0] == "15151":
mac = cmd[1]
connectionRP = CONNECTIONS[mac]
connection.send('killer')
container = cmd[2]
processNameSize = len(container)
processNameSize = bin(processNameSize)[2:].zfill(32)
connection.send(processNameSize)
connection.send(container)
if cmd[0] == "27351":
mac = cmd[1]
connectionRP = CONNECTIONS[mac]
connectionRP.send('shutdw')
DB.shutPi(AdminID, mac )
if cmd[0] == "87452":
mac = cmd[1]
connectionRP = CONNECTIONS[mac]
connectionRP.send('restrt')
DB.restartPi(AdminID, mac )
def userCommands(connection):
print('A user upload a new Dokerfile\n')
print('====================================================================================\n')
mac = DB.getBestPi()
data = str(connection.recv(1024))
print("data is "+data)
path , processName , userID = data.split(":_:")
print("path "+path + " ,processName "+processName)
connectionRP = CONNECTIONS[mac]
connectionRP.send('docker')
utilities.sendFile(connectionRP, path, processName, userID , 'Dockerfile')
response = str(connectionRP.recv(11)).decode('utf-8')
print('\nResponse: ' + response)
if response == 'filecreated':
connectionRP.send('rundocker')
results = str(connectionRP.recv(1024)).decode('utf-8')
print('\nDocker Results: ' + results)
print('====================================================================================\n')
DB.updateResults(userID , processName , results)
def govCommands(connection):
print('An employee upload a new criminal\n')
print('====================================================================================\n')
type = str(connection.recv(1)).strip()
#train the set producing the 2 files
os.system('cd ~/Desktop/TF_FILES; python train.py')
if type == "1":
# general --> send to all pi-s with camera
print('\nClassifier.pkl is send to all the RPs connected with a camera')
macs = DB.getCameraPis()
for x in macs:
print("el mac "+x)
connectionRP = CONNECTIONS[x]
sendToCamera(connectionRP)
if type == "2":
#specific to pi-s in specific locations
locations = str(connection.recv(1024)).strip().split(":_:")
print('\nClassifier.pkl is send to the RPs connected with a camera at those locations only: \n' + locations)
macs = DB.getLocatedPis(locations)
for x in macs:
connectionRP = CONNECTIONS[x]
sendToCamera(connectionRP)
def sendToCamera(connection):
directory = "/home/yamen/Desktop/TF_FILES/generated-embeddings"
connection.send("upload")
utilities.sendFileCamera(connection , directory , 'classifier.pkl' )
response = connection.recv(4)
def main():
setupConnection()
acceptConnections()
if __name__ == '__main__':
main()
|
run-tests.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import logging
from optparse import OptionParser
import os
import re
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
if sys.version < '3':
import Queue
else:
import queue as Queue
from distutils.version import LooseVersion
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules, pyspark_sql # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
# Find out where the assembly jars are located.
# Later, add back 2.12 to this list:
# for scala in ["2.11", "2.12"]:
for scala in ["2.11"]:
build_dir = os.path.join(SPARK_HOME, "assembly", "target", "scala-" + scala)
if os.path.isdir(build_dir):
SPARK_DIST_CLASSPATH = os.path.join(build_dir, "jars", "*")
break
else:
raise Exception("Cannot find assembly build directory, please build Spark first.")
def run_individual_python_test(test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_DIST_CLASSPATH': SPARK_DIST_CLASSPATH,
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python)
})
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark"), test_name],
stderr=per_test_output, stdout=per_test_output, env=env).wait()
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode()
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
per_test_output.close()
LOGGER.info("Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python2.7", "python3.4", "pypy"] if which(x)]
if "python2.7" not in python_execs:
LOGGER.warning("Not testing against `python2.7` because it could not be found; falling"
" back to `python` instead")
python_execs.insert(0, "python")
return python_execs
def parse_opts():
parser = OptionParser(
prog="run-tests"
)
parser.add_option(
"--python-executables", type="string", default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %default)"
)
parser.add_option(
"--modules", type="string",
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %default)"
)
parser.add_option(
"-p", "--parallelism", type="int", default=4,
help="The number of suites to test in parallel (default %default)"
)
parser.add_option(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
(opts, args) = parser.parse_args()
if args:
parser.error("Unsupported arguments: %s" % ' '.join(args))
if opts.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return opts
def _check_dependencies(python_exec, modules_to_test):
if "COVERAGE_PROCESS_START" in os.environ:
# Make sure if coverage is installed.
try:
subprocess_check_output(
[python_exec, "-c", "import coverage"],
stderr=open(os.devnull, 'w'))
except:
print_red("Coverage is not installed in Python executable '%s' "
"but 'COVERAGE_PROCESS_START' environment variable is set, "
"exiting." % python_exec)
sys.exit(-1)
# If we should test 'pyspark-sql', it checks if PyArrow and Pandas are installed and
# explicitly prints out. See SPARK-23300.
if pyspark_sql in modules_to_test:
# TODO(HyukjinKwon): Relocate and deduplicate these version specifications.
minimum_pyarrow_version = '0.8.0'
minimum_pandas_version = '0.19.2'
try:
pyarrow_version = subprocess_check_output(
[python_exec, "-c", "import pyarrow; print(pyarrow.__version__)"],
universal_newlines=True,
stderr=open(os.devnull, 'w')).strip()
if LooseVersion(pyarrow_version) >= LooseVersion(minimum_pyarrow_version):
LOGGER.info("Will test PyArrow related features against Python executable "
"'%s' in '%s' module." % (python_exec, pyspark_sql.name))
else:
LOGGER.warning(
"Will skip PyArrow related features against Python executable "
"'%s' in '%s' module. PyArrow >= %s is required; however, PyArrow "
"%s was found." % (
python_exec, pyspark_sql.name, minimum_pyarrow_version, pyarrow_version))
except:
LOGGER.warning(
"Will skip PyArrow related features against Python executable "
"'%s' in '%s' module. PyArrow >= %s is required; however, PyArrow "
"was not found." % (python_exec, pyspark_sql.name, minimum_pyarrow_version))
try:
pandas_version = subprocess_check_output(
[python_exec, "-c", "import pandas; print(pandas.__version__)"],
universal_newlines=True,
stderr=open(os.devnull, 'w')).strip()
if LooseVersion(pandas_version) >= LooseVersion(minimum_pandas_version):
LOGGER.info("Will test Pandas related features against Python executable "
"'%s' in '%s' module." % (python_exec, pyspark_sql.name))
else:
LOGGER.warning(
"Will skip Pandas related features against Python executable "
"'%s' in '%s' module. Pandas >= %s is required; however, Pandas "
"%s was found." % (
python_exec, pyspark_sql.name, minimum_pandas_version, pandas_version))
except:
LOGGER.warning(
"Will skip Pandas related features against Python executable "
"'%s' in '%s' module. Pandas >= %s is required; however, Pandas "
"was not found." % (python_exec, pyspark_sql.name, minimum_pandas_version))
def main():
opts = parse_opts()
if (opts.verbose):
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test against the following Python executables: %s", python_execs)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
# Check if the python executable has proper dependencies installed to run tests
# for given modules properly.
_check_dependencies(python_exec, modules_to_test)
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.debug("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.debug("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
for module in modules_to_test:
if python_implementation not in module.blacklisted_python_implementations:
for test_goal in module.python_test_goals:
if test_goal in ('pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests'):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
if __name__ == "__main__":
main()
|
pre_commit_linter.py
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pre-commit script for Oppia.
This script lints Python and JavaScript code, and prints a
list of lint errors to the terminal. If the directory path is passed,
it will lint all Python and JavaScript files in that directory; otherwise,
it will only lint files that have been touched in this commit.
This script ignores all filepaths contained within .eslintignore.
=====================
CUSTOMIZATION OPTIONS
=====================
1. To lint only files that have been touched in this commit
python -m scripts.pre_commit_linter
2. To lint all files in the folder or to lint just a specific file
python -m scripts.pre_commit_linter --path filepath
3. To lint a specific list of files (*.js/*.py only). Separate files by spaces
python -m scripts.pre_commit_linter --files file_1 file_2 ... file_n
4. To lint files in verbose mode
python -m scripts.pre_commit_linter --verbose
Note that the root folder MUST be named 'oppia'.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
# Pylint has issues with the import order of argparse.
# pylint: disable=wrong-import-order
import abc
import argparse
import ast
import collections
import contextlib
import fnmatch
import glob
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import time
# Install third party dependencies before proceeding.
from . import install_third_party_libs
install_third_party_libs.main()
# pylint: disable=wrong-import-position
import python_utils # isort:skip
_PARSER = argparse.ArgumentParser()
_EXCLUSIVE_GROUP = _PARSER.add_mutually_exclusive_group()
_EXCLUSIVE_GROUP.add_argument(
'--path',
help='path to the directory with files to be linted',
action='store')
_EXCLUSIVE_GROUP.add_argument(
'--files',
nargs='+',
help='specific files to be linted. Space separated list',
action='store')
_PARSER.add_argument(
'--verbose',
help='verbose mode. All details will be printed.',
action='store_true')
EXCLUDED_PHRASES = [
'utf', 'pylint:', 'http://', 'https://', 'scripts/', 'extract_node']
EXCLUDED_PATHS = (
'third_party/*', 'build/*', '.git/*', '*.pyc', 'CHANGELOG',
'integrations/*', 'integrations_dev/*', '*.svg', '*.gif',
'*.png', '*.zip', '*.ico', '*.jpg', '*.min.js', 'backend_prod_files/*',
'assets/scripts/*', 'core/tests/data/*', 'core/tests/build_sources/*',
'*.mp3', '*.mp4', 'node_modules/*', 'typings/*', 'local_compiled_js/*',
'webpack_bundles/*', 'core/tests/services_sources/*',
'core/tests/release_sources/tmp_unzip.zip',
'core/tests/release_sources/tmp_unzip.tar.gz')
GENERATED_FILE_PATHS = (
'extensions/interactions/LogicProof/static/js/generatedDefaultData.ts',
'extensions/interactions/LogicProof/static/js/generatedParser.ts',
'core/templates/dev/head/expressions/expression-parser.service.js')
CONFIG_FILE_PATHS = (
'core/tests/.browserstack.env.example',
'core/tests/protractor.conf.js',
'core/tests/karma.conf.ts',
'core/templates/dev/head/mathjaxConfig.ts',
'assets/constants.ts',
'assets/rich_text_components_definitions.ts',
'webpack.config.ts',
'webpack.dev.config.ts',
'webpack.prod.config.ts')
BAD_PATTERNS = {
'__author__': {
'message': 'Please remove author tags from this file.',
'excluded_files': (),
'excluded_dirs': ()},
'datetime.datetime.now()': {
'message': 'Please use datetime.datetime.utcnow() instead of'
'datetime.datetime.now().',
'excluded_files': (),
'excluded_dirs': ()},
'\t': {
'message': 'Please use spaces instead of tabs.',
'excluded_files': (),
'excluded_dirs': (
'assets/i18n/', 'core/tests/build_sources/assets/')},
'\r': {
'message': 'Please make sure all files only have LF endings (no CRLF).',
'excluded_files': (),
'excluded_dirs': ()},
'<<<<<<<': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'>>>>>>>': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'glyphicon': {
'message': 'Please use equivalent material-icons '
'instead of glyphicons.',
'excluded_files': (),
'excluded_dirs': ()}
}
BAD_PATTERNS_REGEXP = [
{
'regexp': re.compile(r'TODO[^\(]*[^\)][^:]*[^\w]*$'),
'message': 'Please assign TODO comments to a user '
'in the format TODO(username): XXX. ',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_JS_AND_TS_REGEXP = [
{
'regexp': re.compile(r'\b(browser.explore)\('),
'message': 'In tests, please do not use browser.explore().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(browser.pause)\('),
'message': 'In tests, please do not use browser.pause().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(browser.sleep)\('),
'message': 'In tests, please do not use browser.sleep().',
'excluded_files': (
# TODO(#7622): Remove the file from the excluded list. Remove the
# TODO in core/tests/protractor_desktop/embedding.js pointing to the
# same issue. The following was placed due to a necessary sleep as
# a temporary measure to keep the embedding tests from failing.
'core/tests/protractor_desktop/embedding.js'
),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(browser.waitForAngular)\('),
'message': 'In tests, please do not use browser.waitForAngular().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(ddescribe|fdescribe)\('),
'message': 'In tests, please use \'describe\' instead of \'ddescribe\''
'or \'fdescribe\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(iit|fit)\('),
'message': 'In tests, please use \'it\' instead of \'iit\' or \'fit\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(beforeEach\(inject\(function)\('),
'message': 'In tests, please use \'angular.mock.inject\' instead of '
'\'inject\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'templateUrl: \''),
'message': 'The directives must be directly referenced.',
'excluded_files': (
'core/templates/dev/head/pages/exploration-player-page/'
'FeedbackPopupDirective.js'
),
'excluded_dirs': (
'extensions/answer_summarizers/',
'extensions/classifiers/',
'extensions/dependencies/',
'extensions/value_generators/',
'extensions/visualizations/')
},
{
'regexp': re.compile(r'\$parent'),
'message': 'Please do not access parent properties ' +
'using $parent. Use the scope object' +
'for this purpose.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'require\(.*\.\..*\);'),
'message': 'Please, don\'t use relative imports in require().',
'excluded_files': (),
'excluded_dirs': ('core/tests/')
}
]
MANDATORY_PATTERNS_REGEXP = [
{
'regexp': re.compile(
r'Copyright \d{4} The Oppia Authors\. All Rights Reserved\.'),
'message': 'Please ensure this file should contain a proper '
'copyright notice.',
'included_types': ('.py', '.js', '.sh', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py', ),
'excluded_dirs': EXCLUDED_PATHS
},
{
'regexp': re.compile('from __future__ import unicode_literals'),
'message': 'Please ensure this file should contain unicode_literals '
'future import.',
'included_types': ('.py'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py',),
'excluded_dirs': EXCLUDED_PATHS
}
]
MANDATORY_PATTERNS_JS_REGEXP = [
{
'regexp': re.compile(r'^\s\*\s@fileoverview\s[a-zA-Z0-9_]+'),
'message': 'Please ensure this file should contain a file '
'overview i.e. a short description of the file.',
'included_types': ('.js', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS,
'excluded_dirs': EXCLUDED_PATHS
}
]
BAD_LINE_PATTERNS_HTML_REGEXP = [
{
'regexp': re.compile(r'text\/ng-template'),
'message': 'The directives must be directly referenced.',
'excluded_files': (),
'excluded_dirs': (
'extensions/answer_summarizers/',
'extensions/classifiers/',
'extensions/objects/',
'extensions/value_generators/')
},
{
'regexp': re.compile(r'[ \t]+$'),
'message': 'There should not be any trailing whitespaces.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\$parent'),
'message': 'Please do not access parent properties ' +
'using $parent. Use the scope object' +
'for this purpose.',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_PYTHON_REGEXP = [
{
'regexp': re.compile(r'\Wprint\('),
'message': 'Please do not use print statement.',
'excluded_files': (
'core/tests/test_utils.py',
'core/tests/performance_framework/perf_domain.py'),
'excluded_dirs': ('scripts/',)
},
{
'regexp': re.compile(r'\sprint\('),
'message': 'Please use python_utils.PRINT().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'# pylint:\s*disable=[A-Z][0-9]{4}'),
'message': 'Please remove pylint exclusion if it is unnecessary, or '
'make it human readable with a sentence instead of an id. '
'The id-to-message list can be seen '
'here->http://pylint-messages.wikidot.com/all-codes',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'self.assertEquals\('),
'message': 'Please do not use self.assertEquals method. ' +
'This method has been deprecated. Instead use ' +
'self.assertEqual method.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'with open\(|= open\('),
'message': 'Please use python_utils.open_file() instead of open().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'StringIO'),
'message': 'Please use python_utils.string_io() instead of ' +
'import StringIO.',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*quote\('),
'message': 'Please use python_utils.url_quote().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*unquote_plus\('),
'message': 'Please use python_utils.url_unquote_plus().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*urlencode\('),
'message': 'Please use python_utils.url_encode().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*urlretrieve\('),
'message': 'Please use python_utils.url_retrieve().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib(2)?\..*urlopen\('),
'message': 'Please use python_utils.url_open().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urlsplit'),
'message': 'Please use python_utils.url_split().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urlparse'),
'message': 'Please use python_utils.url_parse().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urlunsplit'),
'message': 'Please use python_utils.url_unsplit().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'parse_qs'),
'message': 'Please use python_utils.parse_query_string().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wunquote\('),
'message': 'Please use python_utils.urllib_unquote().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urljoin'),
'message': 'Please use python_utils.url_join().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib(2)?\..*Request\('),
'message': 'Please use python_utils.url_request().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'[^.|\w]input\('),
'message': 'Please use python_utils.INPUT.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'[^.|\w|\s]map\('),
'message': 'Please use python_utils.MAP.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wnext\('),
'message': 'Please use python_utils.NEXT.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'object\):'),
'message': 'Please use python_utils.OBJECT.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wrange\('),
'message': 'Please use python_utils.RANGE.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wround\('),
'message': 'Please use python_utils.ROUND.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wstr\('),
'message': (
'Please try to use python_utils.convert_to_bytes() for the strings '
'used in webapp2\'s built-in methods or for strings used directly '
'in NDB datastore models. If you need to cast ints/floats to '
'strings, please use python_utils.UNICODE() instead.'),
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wzip\('),
'message': 'Please use python_utils.ZIP.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'basestring'),
'message': 'Please use python_utils.BASESTRING.',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'__metaclass__'),
'message': 'Please use python_utils.with_metaclass().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'iteritems'),
'message': 'Please use items() instead.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'itervalues'),
'message': 'Please use values() instead.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'iterkeys'),
'message': 'Please use keys() instead.',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_MAP = {
'.js': BAD_PATTERNS_JS_AND_TS_REGEXP,
'.ts': BAD_PATTERNS_JS_AND_TS_REGEXP,
'.html': BAD_LINE_PATTERNS_HTML_REGEXP,
'.py': BAD_PATTERNS_PYTHON_REGEXP
}
REQUIRED_STRINGS_CONSTANTS = {
'DEV_MODE: true': {
'message': 'Please set the DEV_MODE variable in constants.ts'
'to true before committing.',
'excluded_files': ()
}
}
ALLOWED_TERMINATING_PUNCTUATIONS = ['.', '?', '}', ']', ')']
CODEOWNER_FILEPATH = '.github/CODEOWNERS'
# This list needs to be in sync with the important patterns in the CODEOWNERS
# file.
CODEOWNER_IMPORTANT_PATHS = [
'/core/controllers/acl_decorators*.py',
'/core/controllers/base*.py',
'/core/domain/html*.py',
'/core/domain/rights_manager*.py',
'/core/domain/role_services*.py',
'/core/domain/user*.py',
'/core/storage/',
'/export/',
'/manifest.json',
'/package.json',
'/yarn.lock',
'/scripts/install_third_party_libs.py',
'/.github/']
# NOTE TO DEVELOPERS: This should match the version of Node used in common.py.
NODE_DIR = os.path.abspath(
os.path.join(os.getcwd(), os.pardir, 'oppia_tools', 'node-10.18.0'))
if not os.getcwd().endswith('oppia'):
python_utils.PRINT('')
python_utils.PRINT(
'ERROR Please run this script from the oppia root directory.')
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PYLINT_PATH = os.path.join(_PARENT_DIR, 'oppia_tools', 'pylint-1.9.4')
if not os.path.exists(_PYLINT_PATH):
python_utils.PRINT('')
python_utils.PRINT(
'ERROR Please run install_third_party_libs.py first to install pylint')
python_utils.PRINT(' and its dependencies.')
sys.exit(1)
_PATHS_TO_INSERT = [
_PYLINT_PATH,
os.getcwd(),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.67',
'google_appengine', 'lib', 'webapp2-2.3'),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.67',
'google_appengine', 'lib', 'yaml-3.10'),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.67',
'google_appengine', 'lib', 'jinja2-2.6'),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.67',
'google_appengine'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'webtest-2.0.33'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'browsermob-proxy-0.8.0'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'esprima-4.0.1'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'pycodestyle-2.5.0'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'pylint-quotes-0.1.8'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'selenium-3.13.0'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'PyGithub-1.43.7'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'Pillow-6.0.0'),
os.path.join('third_party', 'backports.functools_lru_cache-1.5'),
os.path.join('third_party', 'beautifulsoup4-4.7.1'),
os.path.join('third_party', 'bleach-3.1.0'),
os.path.join('third_party', 'callbacks-0.3.0'),
os.path.join('third_party', 'gae-cloud-storage-1.9.22.1'),
os.path.join('third_party', 'gae-mapreduce-1.9.22.0'),
os.path.join('third_party', 'gae-pipeline-1.9.22.1'),
os.path.join('third_party', 'mutagen-1.42.0'),
os.path.join('third_party', 'soupsieve-1.9.1'),
os.path.join('third_party', 'six-1.12.0'),
os.path.join('third_party', 'webencodings-0.5.1'),
]
for path in _PATHS_TO_INSERT:
sys.path.insert(0, path)
# pylint: disable=wrong-import-order
# pylint: disable=wrong-import-position
import isort # isort:skip
import pycodestyle # isort:skip
import esprima # isort:skip
from pylint import lint # isort:skip
from . import build # isort:skip
from . import docstrings_checker # isort:skip
import html.parser # isort:skip
# pylint: enable=wrong-import-order
# pylint: enable=wrong-import-position
_MESSAGE_TYPE_SUCCESS = 'SUCCESS'
_MESSAGE_TYPE_FAILED = 'FAILED'
_TARGET_STDOUT = python_utils.string_io()
_STDOUT_LIST = multiprocessing.Manager().list()
_FILES = multiprocessing.Manager().dict()
class FileCache(python_utils.OBJECT):
"""Provides thread-safe access to cached file content."""
def __init__(self):
self._CACHE_DATA_DICT = {}
def read(self, filepath, mode='r'):
"""Returns the data read from the file in unicode form.
Args:
filepath: str. The file path from which data is to be read.
mode: str. The mode in which the file is to be opened.
Returns:
str. The data read from the file.
"""
return self._get_data(filepath, mode)[0]
def readlines(self, filepath, mode='r'):
"""Returns the tuple containing data line by line as read from the
file in unicode form.
Args:
filepath: str. The file path from which data is to be read.
mode: str. The mode in which the file is to be opened.
Returns:
tuple(str). The tuple containing data line by line as read from the
file.
"""
return self._get_data(filepath, mode)[1]
def _get_data(self, filepath, mode):
"""Returns the collected data from the file corresponding to the given
filepath.
Args:
filepath: str. The file path from which data is to be read.
mode: str. The mode in which the file is to be opened.
Returns:
tuple(str, tuple(str)). The tuple containing data read from the file
as first element and tuple containing the text line by line as
second element.
"""
key = (filepath, mode)
if key not in self._CACHE_DATA_DICT:
with python_utils.open_file(filepath, mode) as f:
lines = f.readlines()
self._CACHE_DATA_DICT[key] = (''.join(lines), tuple(lines))
return self._CACHE_DATA_DICT[key]
def _lint_all_files(
js_filepaths, ts_filepaths, py_filepaths, html_filepaths,
css_filepaths, verbose_mode_enabled):
"""This function is used to check if node-eslint dependencies are
installed and pass ESLint binary path and lint all the files(JS, Python,
HTML, CSS) with their respective third party linters.
Args:
js_filepaths: list(str). The list of js filepaths to be linted.
ts_filepaths: list(str). The list of ts filepaths to be linted.
py_filepaths: list(str). The list of python filepaths to be linted.
html_filepaths: list(str). The list of HTML filepaths to be linted.
css_filepaths: list(str). The list of CSS filepaths to be linted.
verbose_mode_enabled: bool. True if verbose mode is enabled.
Returns:
linting_processes: list(multiprocessing.Process). A list of linting
processes.
result_queues: list(multiprocessing.Queue). A list of queues to put
results of tests.
stdout_queus: list(multiprocessing.Queue). A list of queues to store
Stylelint outputs.
"""
python_utils.PRINT('Starting Js, Ts, Python, HTML, and CSS linter...')
pylintrc_path = os.path.join(os.getcwd(), '.pylintrc')
config_pylint = '--rcfile=%s' % pylintrc_path
config_pycodestyle = os.path.join(os.getcwd(), 'tox.ini')
parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
node_path = os.path.join(NODE_DIR, 'bin', 'node')
eslint_path = os.path.join(
'node_modules', 'eslint', 'bin', 'eslint.js')
stylelint_path = os.path.join(
'node_modules', 'stylelint', 'bin', 'stylelint.js')
config_path_for_css_in_html = os.path.join(
parent_dir, 'oppia', '.stylelintrc')
config_path_for_oppia_css = os.path.join(
parent_dir, 'oppia', 'core', 'templates', 'dev', 'head',
'css', '.stylelintrc')
if not (os.path.exists(eslint_path) and os.path.exists(stylelint_path)):
python_utils.PRINT('')
python_utils.PRINT(
'ERROR Please run start.sh first to install node-eslint ')
python_utils.PRINT(
' or node-stylelint and its dependencies.')
sys.exit(1)
js_and_ts_files_to_lint = js_filepaths + ts_filepaths
linting_processes = []
js_and_ts_result = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_js_and_ts_files, args=(
node_path, eslint_path, js_and_ts_files_to_lint,
js_and_ts_result, verbose_mode_enabled)))
css_in_html_result = multiprocessing.Queue()
css_in_html_stdout = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_css_files, args=(
node_path,
stylelint_path,
config_path_for_css_in_html,
html_filepaths, css_in_html_stdout,
css_in_html_result, verbose_mode_enabled)))
css_result = multiprocessing.Queue()
css_stdout = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_css_files, args=(
node_path,
stylelint_path,
config_path_for_oppia_css,
css_filepaths, css_stdout,
css_result, verbose_mode_enabled)))
py_result = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_py_files,
args=(
config_pylint, config_pycodestyle, py_filepaths,
py_result, verbose_mode_enabled)))
py_result_for_python3_compatibility = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_py_files_for_python3_compatibility,
args=(
py_filepaths, py_result_for_python3_compatibility,
verbose_mode_enabled)))
for process in linting_processes:
process.daemon = False
process.start()
result_queues = [
js_and_ts_result, css_in_html_result, css_result, py_result,
py_result_for_python3_compatibility
]
stdout_queus = [
css_in_html_stdout, css_stdout
]
return linting_processes, result_queues, stdout_queus
def _is_filepath_excluded_for_bad_patterns_check(pattern, filepath):
"""Checks if file is excluded from the bad patterns check.
Args:
pattern: str. The pattern to be checked against.
filepath: str. Path of the file.
Returns:
bool: Whether to exclude the given file from this
particular pattern check.
"""
return (any(filepath.startswith(bad_pattern)
for bad_pattern in BAD_PATTERNS[pattern]['excluded_dirs'])
or filepath in BAD_PATTERNS[pattern]['excluded_files'])
def _get_expression_from_node_if_one_exists(
parsed_node, components_to_check):
"""This function first checks whether the parsed node represents
the required angular component that needs to be derived by checking if
its in the 'components_to_check' list. If yes, then it will return the
expression part of the node from which the component can be derived.
If no, it will return None. It is done by filtering out
'AssignmentExpression' (as it represents an assignment) and 'Identifier'
(as it represents a static expression).
Args:
parsed_node: dict. Parsed node of the body of a JS file.
components_to_check: list(str). List of angular components to check
in a JS file. These include directives, factories, controllers,
etc.
Returns:
expression: dict or None. Expression part of the node if the node
represents a component else None.
"""
if parsed_node.type != 'ExpressionStatement':
return
# Separate the expression part of the node which is the actual
# content of the node.
expression = parsed_node.expression
# Check whether the expression belongs to a
# 'CallExpression' which always contains a call
# and not an 'AssignmentExpression'.
# For example, func() is a CallExpression.
if expression.type != 'CallExpression':
return
# Check whether the expression belongs to a 'MemberExpression' which
# represents a computed expression or an Identifier which represents
# a static expression.
# For example, 'thing.func' is a MemberExpression where
# 'thing' is the object of the MemberExpression and
# 'func' is the property of the MemberExpression.
# Another example of a MemberExpression within a CallExpression is
# 'thing.func()' where 'thing.func' is the callee of the CallExpression.
if expression.callee.type != 'MemberExpression':
return
# Get the component in the JS file.
component = expression.callee.property.name
if component not in components_to_check:
return
return expression
def _walk_with_gitignore(root, exclude_dirs):
"""A walk function similar to os.walk but this would ignore the files and
directories which is not tracked by git. Also, this will ignore the
directories mentioned in exclude_dirs.
Args:
root: str. The path from where the function should start walking.
exclude_dirs: list(str). A list of dir path which should be ignored.
Yields:
list(str). A list of unignored files.
"""
dirs, file_paths = [], []
for name in os.listdir(root):
if os.path.isdir(os.path.join(root, name)):
dirs.append(os.path.join(root, name))
else:
file_paths.append(os.path.join(root, name))
yield [file_path for file_path in file_paths if not _is_path_ignored(
file_path)]
for dir_path in dirs:
# Adding "/" in the end of the dir path according to the git dir path
# structure.
if (not _is_path_ignored(dir_path + '/')) and (
dir_path not in exclude_dirs):
for x in _walk_with_gitignore(dir_path, exclude_dirs):
yield x
def _is_path_ignored(path_to_check):
"""Checks whether the given path is ignored by git.
Args:
path_to_check: str. A path to a file or a dir.
Returns:
bool. Whether the given path is ignored by git.
"""
command = ['git', 'check-ignore', '-q', path_to_check]
# The "git check-ignore <path>" command returns 0 when the path is ignored
# otherwise it returns 1. subprocess.call then returns this returncode.
if subprocess.call(command):
return False
else:
return True
def _get_changed_filepaths():
"""Returns a list of modified files (both staged and unstaged)
Returns:
a list of filepaths of modified files.
"""
unstaged_files = subprocess.check_output([
'git', 'diff', '--name-only',
'--diff-filter=ACM']).splitlines()
staged_files = subprocess.check_output([
'git', 'diff', '--cached', '--name-only',
'--diff-filter=ACM']).splitlines()
all_changed_filepaths = unstaged_files + staged_files
return [filepath for filepath in all_changed_filepaths]
def _get_all_files_in_directory(dir_path, excluded_glob_patterns):
"""Recursively collects all files in directory and
subdirectories of specified path.
Args:
dir_path: str. Path to the folder to be linted.
excluded_glob_patterns: set(str). Set of all glob patterns
to be excluded.
Returns:
a list of files in directory and subdirectories without excluded files.
"""
files_in_directory = []
for _dir, _, files in os.walk(dir_path):
for file_name in files:
filepath = os.path.relpath(
os.path.join(_dir, file_name), os.getcwd())
if not any([fnmatch.fnmatch(filepath, gp) for gp in
excluded_glob_patterns]):
files_in_directory.append(filepath)
return files_in_directory
@contextlib.contextmanager
def _redirect_stdout(new_target):
"""Redirect stdout to the new target.
Args:
new_target: TextIOWrapper. The new target to which stdout is redirected.
Yields:
TextIOWrapper. The new target.
"""
old_target = sys.stdout
sys.stdout = new_target
try:
yield new_target
finally:
sys.stdout = old_target
def _get_all_filepaths(input_path, input_filenames):
"""This function is used to return the filepaths which needs to be linted
and checked.
Args:
input_path: str. The path of the directory to be linted and checked.
input_filenames: list(str). The list of filenames to be linted and
checked, ignored if input_path is specified.
Returns:
all_filepaths: list(str). The list of filepaths to be linted and
checked.
"""
eslintignore_path = os.path.join(os.getcwd(), '.eslintignore')
if input_path:
input_path = os.path.join(os.getcwd(), input_path)
if not os.path.exists(input_path):
python_utils.PRINT(
'Could not locate file or directory %s. Exiting.' % input_path)
python_utils.PRINT('----------------------------------------')
sys.exit(1)
if os.path.isfile(input_path):
all_filepaths = [input_path]
else:
excluded_glob_patterns = FILE_CACHE.readlines(eslintignore_path)
all_filepaths = _get_all_files_in_directory(
input_path, excluded_glob_patterns)
elif input_filenames:
valid_filepaths = []
invalid_filepaths = []
for filename in input_filenames:
if os.path.isfile(filename):
valid_filepaths.append(filename)
else:
invalid_filepaths.append(filename)
if invalid_filepaths:
python_utils.PRINT(
'The following file(s) do not exist: %s\n'
'Exiting.' % invalid_filepaths)
sys.exit(1)
all_filepaths = valid_filepaths
else:
all_filepaths = _get_changed_filepaths()
all_filepaths = [
filename for filename in all_filepaths if not
any(fnmatch.fnmatch(filename, pattern) for pattern in EXCLUDED_PATHS)]
return all_filepaths
def _check_bad_pattern_in_file(filepath, file_content, pattern):
"""Detects whether the given pattern is present in the file.
Args:
filepath: str. Path of the file.
file_content: str. Contents of the file.
pattern: dict. (regexp(regex pattern) : pattern to match,
message(str) : message to show if pattern matches,
excluded_files(tuple(str)) : files to be excluded from matching,
excluded_dirs(tuple(str)) : directories to be excluded from
matching).
Object containing details for the pattern to be checked.
Returns:
bool. True if there is bad pattern else false.
"""
regexp = pattern['regexp']
if not (any(filepath.startswith(excluded_dir)
for excluded_dir in pattern['excluded_dirs'])
or filepath in pattern['excluded_files']):
bad_pattern_count = 0
for line_num, line in enumerate(file_content.split('\n'), 1):
if line.endswith('disable-bad-pattern-check'):
continue
if regexp.search(line):
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num, pattern['message']))
python_utils.PRINT('')
bad_pattern_count += 1
if bad_pattern_count:
return True
return False
def _check_file_type_specific_bad_pattern(filepath, content):
"""Check the file content based on the file's extension.
Args:
filepath: str. Path of the file.
content: str. Contents of the file.
Returns:
failed: bool. True if there is bad pattern else false.
total_error_count: int. The number of errors.
"""
_, extension = os.path.splitext(filepath)
pattern = BAD_PATTERNS_MAP.get(extension)
failed = False
total_error_count = 0
if pattern:
for regexp in pattern:
if _check_bad_pattern_in_file(filepath, content, regexp):
failed = True
total_error_count += 1
return failed, total_error_count
class TagMismatchException(Exception):
"""Error class for mismatch between start and end tags."""
pass
class CustomHTMLParser(html.parser.HTMLParser):
"""Custom HTML parser to check indentation."""
def __init__(self, filepath, file_lines, debug, failed=False):
"""Define various variables to parse HTML.
Args:
filepath: str. path of the file.
file_lines: list(str). list of the lines in the file.
debug: bool. if true prints tag_stack for the file.
failed: bool. true if the HTML indentation check fails.
"""
html.parser.HTMLParser.__init__(self)
self.tag_stack = []
self.debug = debug
self.failed = failed
self.filepath = filepath
self.file_lines = file_lines
self.indentation_level = 0
self.indentation_width = 2
self.void_elements = [
'area', 'base', 'br', 'col', 'embed',
'hr', 'img', 'input', 'link', 'meta',
'param', 'source', 'track', 'wbr']
def handle_starttag(self, tag, attrs):
"""Handle start tag of a HTML line.
Args:
tag: str. start tag of a HTML line.
attrs: list(str). list of attributes in the start tag.
"""
line_number, column_number = self.getpos()
# Check the indentation of the tag.
expected_indentation = self.indentation_level * self.indentation_width
tag_line = self.file_lines[line_number - 1].lstrip()
opening_tag = '<' + tag
# Check the indentation for content of style tag.
if tag_line.startswith(opening_tag) and tag == 'style':
# Getting next line after style tag.
next_line = self.file_lines[line_number]
next_line_expected_indentation = (
self.indentation_level + 1) * self.indentation_width
next_line_column_number = len(next_line) - len(next_line.lstrip())
if next_line_column_number != next_line_expected_indentation:
python_utils.PRINT(
'%s --> Expected indentation '
'of %s, found indentation of %s '
'for content of %s tag on line %s ' % (
self.filepath, next_line_expected_indentation,
next_line_column_number, tag, line_number + 1))
python_utils.PRINT('')
self.failed = True
if tag_line.startswith(opening_tag) and (
column_number != expected_indentation):
python_utils.PRINT(
'%s --> Expected indentation '
'of %s, found indentation of %s '
'for %s tag on line %s ' % (
self.filepath, expected_indentation,
column_number, tag, line_number))
python_utils.PRINT('')
self.failed = True
if tag not in self.void_elements:
self.tag_stack.append((tag, line_number, column_number))
self.indentation_level += 1
if self.debug:
python_utils.PRINT('DEBUG MODE: Start tag_stack')
python_utils.PRINT(self.tag_stack)
# Check the indentation of the attributes of the tag.
indentation_of_first_attribute = (
column_number + len(tag) + 2)
starttag_text = self.get_starttag_text()
# Check whether the values of all attributes are placed
# in double quotes.
for attr, value in attrs:
# Not all attributes will have a value.
# Therefore the check should run only for those
# attributes which have a value.
if value:
expected_value = '"' + value + '"'
# " is rendered as a double quote by the parser.
if '"' in starttag_text:
rendered_text = starttag_text.replace('"', '"')
else:
rendered_text = starttag_text
if not expected_value in rendered_text:
self.failed = True
python_utils.PRINT(
'%s --> The value %s of attribute '
'%s for the tag %s on line %s should '
'be enclosed within double quotes.' % (
self.filepath, value, attr,
tag, line_number))
python_utils.PRINT('')
for line_num, line in enumerate(starttag_text.splitlines()):
if line_num == 0:
continue
leading_spaces_count = len(line) - len(line.lstrip())
list_of_attrs = []
for attr, _ in attrs:
list_of_attrs.append(attr)
if not line.lstrip().startswith(tuple(list_of_attrs)):
continue
if indentation_of_first_attribute != leading_spaces_count:
line_num_of_error = line_number + line_num
python_utils.PRINT(
'%s --> Attribute for tag %s on line '
'%s should align with the leftmost '
'attribute on line %s ' % (
self.filepath, tag,
line_num_of_error, line_number))
python_utils.PRINT('')
self.failed = True
def handle_endtag(self, tag):
"""Handle end tag of a HTML line.
Args:
tag: str. end tag of a HTML line.
"""
line_number, _ = self.getpos()
tag_line = self.file_lines[line_number - 1]
leading_spaces_count = len(tag_line) - len(tag_line.lstrip())
try:
last_starttag, last_starttag_line_num, last_starttag_col_num = (
self.tag_stack.pop())
except IndexError:
raise TagMismatchException('Error in line %s of file %s\n' % (
line_number, self.filepath))
if last_starttag != tag:
raise TagMismatchException('Error in line %s of file %s\n' % (
line_number, self.filepath))
if leading_spaces_count != last_starttag_col_num and (
last_starttag_line_num != line_number):
python_utils.PRINT(
'%s --> Indentation for end tag %s on line '
'%s does not match the indentation of the '
'start tag %s on line %s ' % (
self.filepath, tag, line_number,
last_starttag, last_starttag_line_num))
python_utils.PRINT('')
self.failed = True
self.indentation_level -= 1
if self.debug:
python_utils.PRINT('DEBUG MODE: End tag_stack')
python_utils.PRINT(self.tag_stack)
def handle_data(self, data):
"""Handle indentation level.
Args:
data: str. contents of HTML file to be parsed.
"""
data_lines = data.split('\n')
opening_block = tuple(
['{% block', '{% macro', '{% if', '% for', '% if'])
ending_block = tuple(['{% end', '{%- end', '% } %>'])
for data_line in data_lines:
data_line = data_line.lstrip()
if data_line.startswith(opening_block):
self.indentation_level += 1
elif data_line.startswith(ending_block):
self.indentation_level -= 1
def check_for_important_patterns_at_bottom_of_codeowners(important_patterns):
"""Checks that the most important patterns are at the bottom
of the CODEOWNERS file.
Arguments:
important_patterns: list(str). List of the important
patterns for CODEOWNERS file.
Returns:
bool. Whether the CODEOWNERS "important pattern" check fails.
"""
failed = False
# Check that there are no duplicate elements in the lists.
important_patterns_set = set(important_patterns)
codeowner_important_paths_set = set(CODEOWNER_IMPORTANT_PATHS)
if len(important_patterns_set) != len(important_patterns):
python_utils.PRINT(
'%s --> Duplicate pattern(s) found in critical rules'
' section.' % CODEOWNER_FILEPATH)
failed = True
if len(codeowner_important_paths_set) != len(CODEOWNER_IMPORTANT_PATHS):
python_utils.PRINT(
'scripts/pre_commit_linter.py --> Duplicate pattern(s) found '
'in CODEOWNER_IMPORTANT_PATHS list.')
failed = True
# Check missing rules by set difference operation.
critical_rule_section_minus_list_set = (
important_patterns_set.difference(codeowner_important_paths_set))
list_minus_critical_rule_section_set = (
codeowner_important_paths_set.difference(important_patterns_set))
for rule in critical_rule_section_minus_list_set:
python_utils.PRINT(
'%s --> Rule %s is not present in the '
'CODEOWNER_IMPORTANT_PATHS list in '
'scripts/pre_commit_linter.py. Please add this rule in the '
'mentioned list or remove this rule from the \'Critical files'
'\' section.' % (CODEOWNER_FILEPATH, rule))
failed = True
for rule in list_minus_critical_rule_section_set:
python_utils.PRINT(
'%s --> Rule \'%s\' is not present in the \'Critical files\' '
'section. Please place it under the \'Critical files\' '
'section since it is an important rule. Alternatively please '
'remove it from the \'CODEOWNER_IMPORTANT_PATHS\' list in '
'scripts/pre_commit_linter.py if it is no longer an '
'important rule.' % (CODEOWNER_FILEPATH, rule))
failed = True
return failed
def _check_codeowner_file(verbose_mode_enabled):
"""Checks the CODEOWNERS file for any uncovered dirs/files and also
checks that every pattern in the CODEOWNERS file matches at least one
file/dir. Note that this checks the CODEOWNERS file according to the
glob patterns supported by Python2.7 environment. For more information
please refer https://docs.python.org/2/library/glob.html.
This function also ensures that the most important rules are at the
bottom of the CODEOWNERS file.
"""
if verbose_mode_enabled:
python_utils.PRINT('Starting CODEOWNERS file check')
python_utils.PRINT('----------------------------------------')
with _redirect_stdout(_TARGET_STDOUT):
failed = False
summary_messages = []
# Checks whether every pattern in the CODEOWNERS file matches at
# least one dir/file.
critical_file_section_found = False
important_rules_in_critical_section = []
file_patterns = []
dir_patterns = []
for line_num, line in enumerate(FILE_CACHE.readlines(
CODEOWNER_FILEPATH)):
stripped_line = line.strip()
if '# Critical files' in line:
critical_file_section_found = True
if stripped_line and stripped_line[0] != '#':
if '@' not in line:
python_utils.PRINT(
'%s --> Pattern on line %s doesn\'t have '
'codeowner' % (CODEOWNER_FILEPATH, line_num + 1))
failed = True
else:
# Extract the file pattern from the line.
line_in_concern = line.split('@')[0].strip()
# This is being populated for the important rules
# check.
if critical_file_section_found:
important_rules_in_critical_section.append(
line_in_concern)
# Checks if the path is the full path relative to the
# root oppia directory.
if not line_in_concern.startswith('/'):
python_utils.PRINT(
'%s --> Pattern on line %s is invalid. Use '
'full path relative to the root directory'
% (CODEOWNER_FILEPATH, line_num + 1))
failed = True
# The double asterisks pattern is supported by the
# CODEOWNERS syntax but not the glob in Python 2.
# The following condition checks this.
if '**' in line_in_concern:
python_utils.PRINT(
'%s --> Pattern on line %s is invalid. '
'\'**\' wildcard not allowed' % (
CODEOWNER_FILEPATH, line_num + 1))
failed = True
# Adjustments to the dir paths in CODEOWNERS syntax
# for glob-style patterns to match correctly.
if line_in_concern.endswith('/'):
line_in_concern = line_in_concern[:-1]
# The following condition checks whether the specified
# path exists in the codebase or not. The CODEOWNERS
# syntax has paths starting with '/' which refers to
# full path relative to root, but python glob module
# does not conform to this logic and literally matches
# the '/' character. Therefore the leading '/' has to
# be changed to './' for glob patterns to match
# correctly.
line_in_concern = line_in_concern.replace('/', './', 1)
if not glob.glob(line_in_concern):
python_utils.PRINT(
'%s --> Pattern on line %s doesn\'t match '
'any file or directory' % (
CODEOWNER_FILEPATH, line_num + 1))
failed = True
# The following list is being populated with the
# paths in the CODEOWNERS file with the removal of the
# leading '/' to aid in the glob pattern matching in
# the next part of the check wherein the valid patterns
# are used to check if they cover the entire codebase.
if os.path.isdir(line_in_concern):
dir_patterns.append(line_in_concern)
else:
file_patterns.append(line_in_concern)
# Checks that every file (except those under the dir represented by
# the dir_patterns) is covered under CODEOWNERS.
for file_paths in _walk_with_gitignore('.', dir_patterns):
for file_path in file_paths:
match = False
for file_pattern in file_patterns:
if file_path in glob.glob(file_pattern):
match = True
break
if not match:
python_utils.PRINT(
'%s is not listed in the .github/CODEOWNERS file.' % (
file_path))
failed = True
failed = failed or (
check_for_important_patterns_at_bottom_of_codeowners(
important_rules_in_critical_section))
if failed:
summary_message = (
'%s CODEOWNERS file coverage check failed, see messages '
'above for files that need to be added or patterns that need '
'to be fixed.' % _MESSAGE_TYPE_FAILED)
else:
summary_message = '%s CODEOWNERS file coverage check passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT(summary_message)
python_utils.PRINT('')
return summary_messages
def _lint_css_files(
node_path, stylelint_path, config_path, files_to_lint, stdout, result,
verbose_mode_enabled):
"""Prints a list of lint errors in the given list of CSS files.
Args:
node_path: str. Path to the node binary.
stylelint_path: str. Path to the Stylelint binary.
config_path: str. Path to the configuration file.
files_to_lint: list(str). A list of filepaths to lint.
stdout: multiprocessing.Queue. A queue to store Stylelint outputs.
result: multiprocessing.Queue. A queue to put results of test.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
start_time = time.time()
num_files_with_errors = 0
num_css_files = len(files_to_lint)
if not files_to_lint:
result.put('')
python_utils.PRINT('There are no CSS files to lint.')
return
python_utils.PRINT('Total css files: ', num_css_files)
stylelint_cmd_args = [
node_path, stylelint_path, '--config=' + config_path]
result_list = []
if not verbose_mode_enabled:
python_utils.PRINT('Linting CSS files.')
for _, filepath in enumerate(files_to_lint):
if verbose_mode_enabled:
python_utils.PRINT('Linting: ', filepath)
proc_args = stylelint_cmd_args + [filepath]
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
encoded_linter_stdout, encoded_linter_stderr = proc.communicate()
linter_stdout = encoded_linter_stdout.decode(encoding='utf-8')
linter_stderr = encoded_linter_stderr.decode(encoding='utf-8')
if linter_stderr:
python_utils.PRINT('LINTER FAILED')
python_utils.PRINT(linter_stderr)
sys.exit(1)
if linter_stdout:
num_files_with_errors += 1
result_list.append(linter_stdout)
python_utils.PRINT(linter_stdout)
stdout.put(linter_stdout)
if num_files_with_errors:
for error in result_list:
result.put(error)
result.put('%s %s CSS file' % (
_MESSAGE_TYPE_FAILED, num_files_with_errors))
else:
result.put('%s %s CSS file linted (%.1f secs)' % (
_MESSAGE_TYPE_SUCCESS, num_css_files, time.time() - start_time))
python_utils.PRINT('CSS linting finished.')
def _lint_js_and_ts_files(
node_path, eslint_path, files_to_lint, result, verbose_mode_enabled):
"""Prints a list of lint errors in the given list of JavaScript files.
Args:
node_path: str. Path to the node binary.
eslint_path: str. Path to the ESLint binary.
files_to_lint: list(str). A list of filepaths to lint.
result: multiprocessing.Queue. A queue to put results of test.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
start_time = time.time()
num_files_with_errors = 0
num_js_and_ts_files = len(files_to_lint)
if not files_to_lint:
result.put('')
python_utils.PRINT(
'There are no JavaScript or Typescript files to lint.')
return
python_utils.PRINT('Total js and ts files: ', num_js_and_ts_files)
eslint_cmd_args = [node_path, eslint_path, '--quiet']
result_list = []
python_utils.PRINT('Linting JS and TS files.')
for _, filepath in enumerate(files_to_lint):
if verbose_mode_enabled:
python_utils.PRINT('Linting: ', filepath)
proc_args = eslint_cmd_args + [filepath]
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
encoded_linter_stdout, encoded_linter_stderr = proc.communicate()
linter_stdout = encoded_linter_stdout.decode(encoding='utf-8')
linter_stderr = encoded_linter_stderr.decode(encoding='utf-8')
if linter_stderr:
python_utils.PRINT('LINTER FAILED')
python_utils.PRINT(linter_stderr)
sys.exit(1)
if linter_stdout:
num_files_with_errors += 1
result_list.append(linter_stdout)
if num_files_with_errors:
for error in result_list:
result.put(error)
result.put('%s %s JavaScript and Typescript files' % (
_MESSAGE_TYPE_FAILED, num_files_with_errors))
else:
result.put(
'%s %s JavaScript and Typescript files linted (%.1f secs)' % (
_MESSAGE_TYPE_SUCCESS, num_js_and_ts_files,
time.time() - start_time))
python_utils.PRINT('Js and Ts linting finished.')
def _lint_py_files(
config_pylint, config_pycodestyle, files_to_lint, result,
verbose_mode_enabled):
"""Prints a list of lint errors in the given list of Python files.
Args:
config_pylint: str. Path to the .pylintrc file.
config_pycodestyle: str. Path to the tox.ini file.
files_to_lint: list(str). A list of filepaths to lint.
result: multiprocessing.Queue. A queue to put results of test.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
start_time = time.time()
are_there_errors = False
num_py_files = len(files_to_lint)
if not files_to_lint:
result.put('')
python_utils.PRINT('There are no Python files to lint.')
return
python_utils.PRINT('Linting %s Python files' % num_py_files)
_batch_size = 50
current_batch_start_index = 0
while current_batch_start_index < len(files_to_lint):
# Note that this index is an exclusive upper bound -- i.e., the current
# batch of files ranges from 'start_index' to 'end_index - 1'.
current_batch_end_index = min(
current_batch_start_index + _batch_size, len(files_to_lint))
current_files_to_lint = files_to_lint[
current_batch_start_index: current_batch_end_index]
if verbose_mode_enabled:
python_utils.PRINT('Linting Python files %s to %s...' % (
current_batch_start_index + 1, current_batch_end_index))
with _redirect_stdout(_TARGET_STDOUT):
# This line invokes Pylint and prints its output
# to the target stdout.
pylinter = lint.Run(
current_files_to_lint + [config_pylint],
exit=False).linter
# These lines invoke Pycodestyle and print its output
# to the target stdout.
style_guide = pycodestyle.StyleGuide(config_file=config_pycodestyle)
pycodestyle_report = style_guide.check_files(
paths=current_files_to_lint)
if pylinter.msg_status != 0 or pycodestyle_report.get_count() != 0:
result.put(_TARGET_STDOUT.getvalue())
are_there_errors = True
current_batch_start_index = current_batch_end_index
if are_there_errors:
result.put('%s Python linting failed' % _MESSAGE_TYPE_FAILED)
else:
result.put('%s %s Python files linted (%.1f secs)' % (
_MESSAGE_TYPE_SUCCESS, num_py_files, time.time() - start_time))
python_utils.PRINT('Python linting finished.')
def _lint_py_files_for_python3_compatibility(
files_to_lint, result, verbose_mode_enabled):
"""Prints a list of Python 3 compatibility errors in the given list of
Python files.
Args:
files_to_lint: list(str). A list of filepaths to lint.
result: multiprocessing.Queue. A queue to put results of test.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
start_time = time.time()
any_errors = False
files_to_lint_for_python3_compatibility = [
file_name for file_name in files_to_lint if not re.match(
r'^.*python_utils.*\.py$', file_name)]
num_py_files = len(files_to_lint_for_python3_compatibility)
if not files_to_lint_for_python3_compatibility:
result.put('')
python_utils.PRINT(
'There are no Python files to lint for Python 3 compatibility.')
return
python_utils.PRINT(
'Linting %s Python files for Python 3 compatibility.' % num_py_files)
_batch_size = 50
current_batch_start_index = 0
while current_batch_start_index < len(
files_to_lint_for_python3_compatibility):
# Note that this index is an exclusive upper bound -- i.e., the current
# batch of files ranges from 'start_index' to 'end_index - 1'.
current_batch_end_index = min(
current_batch_start_index + _batch_size, len(
files_to_lint_for_python3_compatibility))
current_files_to_lint = files_to_lint_for_python3_compatibility[
current_batch_start_index: current_batch_end_index]
if verbose_mode_enabled:
python_utils.PRINT(
'Linting Python files for Python 3 compatibility %s to %s...'
% (current_batch_start_index + 1, current_batch_end_index))
with _redirect_stdout(_TARGET_STDOUT):
# This line invokes Pylint and prints its output
# to the target stdout.
python_utils.PRINT('Messages for Python 3 support:')
pylinter_for_python3 = lint.Run(
current_files_to_lint + ['--py3k'], exit=False).linter
if pylinter_for_python3.msg_status != 0:
result.put(_TARGET_STDOUT.getvalue())
any_errors = True
current_batch_start_index = current_batch_end_index
if any_errors:
result.put(
'%s Python linting for Python 3 compatibility failed'
% _MESSAGE_TYPE_FAILED)
else:
result.put(
'%s %s Python files linted for Python 3 compatibility (%.1f secs)'
% (_MESSAGE_TYPE_SUCCESS, num_py_files, time.time() - start_time))
python_utils.PRINT('Python linting for Python 3 compatibility finished.')
def _check_codeowner_file(verbose_mode_enabled):
"""Checks the CODEOWNERS file for any uncovered dirs/files and also
checks that every pattern in the CODEOWNERS file matches at least one
file/dir. Note that this checks the CODEOWNERS file according to the
glob patterns supported by Python2.7 environment. For more information
please refer https://docs.python.org/2/library/glob.html.
This function also ensures that the most important rules are at the
bottom of the CODEOWNERS file.
"""
if verbose_mode_enabled:
python_utils.PRINT('Starting CODEOWNERS file check')
python_utils.PRINT('----------------------------------------')
with _redirect_stdout(_TARGET_STDOUT):
failed = False
summary_messages = []
# Checks whether every pattern in the CODEOWNERS file matches at
# least one dir/file.
critical_file_section_found = False
important_rules_in_critical_section = []
file_patterns = []
dir_patterns = []
for line_num, line in enumerate(FILE_CACHE.readlines(
CODEOWNER_FILEPATH)):
stripped_line = line.strip()
if '# Critical files' in line:
critical_file_section_found = True
if stripped_line and stripped_line[0] != '#':
if '@' not in line:
python_utils.PRINT(
'%s --> Pattern on line %s doesn\'t have '
'codeowner' % (CODEOWNER_FILEPATH, line_num + 1))
failed = True
else:
# Extract the file pattern from the line.
line_in_concern = line.split('@')[0].strip()
# This is being populated for the important rules
# check.
if critical_file_section_found:
important_rules_in_critical_section.append(
line_in_concern)
# Checks if the path is the full path relative to the
# root oppia directory.
if not line_in_concern.startswith('/'):
python_utils.PRINT(
'%s --> Pattern on line %s is invalid. Use '
'full path relative to the root directory'
% (CODEOWNER_FILEPATH, line_num + 1))
failed = True
# The double asterisks pattern is supported by the
# CODEOWNERS syntax but not the glob in Python 2.
# The following condition checks this.
if '**' in line_in_concern:
python_utils.PRINT(
'%s --> Pattern on line %s is invalid. '
'\'**\' wildcard not allowed' % (
CODEOWNER_FILEPATH, line_num + 1))
failed = True
# Adjustments to the dir paths in CODEOWNERS syntax
# for glob-style patterns to match correctly.
if line_in_concern.endswith('/'):
line_in_concern = line_in_concern[:-1]
# The following condition checks whether the specified
# path exists in the codebase or not. The CODEOWNERS
# syntax has paths starting with '/' which refers to
# full path relative to root, but python glob module
# does not conform to this logic and literally matches
# the '/' character. Therefore the leading '/' has to
# be changed to './' for glob patterns to match
# correctly.
line_in_concern = line_in_concern.replace('/', './', 1)
if not glob.glob(line_in_concern):
python_utils.PRINT(
'%s --> Pattern on line %s doesn\'t match '
'any file or directory' % (
CODEOWNER_FILEPATH, line_num + 1))
failed = True
# The following list is being populated with the
# paths in the CODEOWNERS file with the removal of the
# leading '/' to aid in the glob pattern matching in
# the next part of the check wherein the valid patterns
# are used to check if they cover the entire codebase.
if os.path.isdir(line_in_concern):
dir_patterns.append(line_in_concern)
else:
file_patterns.append(line_in_concern)
# Checks that every file (except those under the dir represented by
# the dir_patterns) is covered under CODEOWNERS.
for file_paths in _walk_with_gitignore('.', dir_patterns):
for file_path in file_paths:
match = False
for file_pattern in file_patterns:
if file_path in glob.glob(file_pattern):
match = True
break
if not match:
python_utils.PRINT(
'%s is not listed in the .github/CODEOWNERS file.' % (
file_path))
failed = True
failed = failed or (
check_for_important_patterns_at_bottom_of_codeowners(
important_rules_in_critical_section))
if failed:
summary_message = (
'%s CODEOWNERS file coverage check failed, see messages '
'above for files that need to be added or patterns that need '
'to be fixed.' % _MESSAGE_TYPE_FAILED)
else:
summary_message = '%s CODEOWNERS file check passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT(summary_message)
python_utils.PRINT('')
return summary_messages
class LintChecksManager( # pylint: disable=inherit-non-class
python_utils.with_metaclass(abc.ABCMeta, python_utils.OBJECT)):
"""Manages all the common linting functions. As an abstract base class, this
is not intended to be used directly.
Attributes:
all_filepaths: list(str). The list of filepaths to be linted.
parsed_js_files: dict. Contains the content of JS files, after
validating and parsing the files.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
def __init__(self, verbose_mode_enabled=False): # pylint: disable=super-init-not-called
"""Constructs a LintChecksManager object.
Args:
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
# Set path for node.
# The path for node is set explicitly, since otherwise the lint
# tests fail on CircleCI due to the TypeScript files not being
# compilable.
os.environ['PATH'] = '%s/bin:' % NODE_DIR + os.environ['PATH']
self.verbose_mode_enabled = verbose_mode_enabled
self.process_manager = multiprocessing.Manager().dict()
@abc.abstractproperty
def all_filepaths(self):
"""Returns all file paths."""
pass
def _run_multiple_checks(self, *checks):
"""Run multiple checks in parallel."""
processes = []
for check in checks:
p = multiprocessing.Process(target=check)
processes.append(p)
p.start()
for p in processes:
p.join()
def _check_for_mandatory_pattern_in_file(
self, pattern_list, filepath, failed):
"""Checks for a given mandatory pattern in a file.
Args:
pattern_list: list(dict). The list of the mandatory patterns list to
be checked for in the file.
filepath: str. The path to the file to be linted.
failed: bool. Status of failure of the check.
Returns:
bool. The failure status of the check.
"""
# This boolean list keeps track of the regex matches
# found in the file.
pattern_found_list = []
file_content = FILE_CACHE.readlines(filepath)
for index, regexp_to_check in enumerate(
pattern_list):
if (any([filepath.endswith(
allowed_type) for allowed_type in (
regexp_to_check['included_types'])]) and (
not any([
filepath.endswith(
pattern) for pattern in (
regexp_to_check[
'excluded_files'] +
regexp_to_check[
'excluded_dirs'])]))):
pattern_found_list.append(index)
for line in file_content:
if regexp_to_check['regexp'].search(line):
pattern_found_list.pop()
break
if pattern_found_list:
failed = True
for pattern_found in pattern_found_list:
python_utils.PRINT('%s --> %s' % (
filepath,
pattern_list[pattern_found]['message']))
return failed
def _check_mandatory_patterns(self):
"""This function checks that all files contain the mandatory
patterns.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting mandatory patterns check')
python_utils.PRINT('----------------------------------------')
summary_messages = []
failed = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
sets_of_patterns_to_match = [
MANDATORY_PATTERNS_REGEXP, MANDATORY_PATTERNS_JS_REGEXP]
for filepath in self.all_filepaths:
for pattern_list in sets_of_patterns_to_match:
failed = self._check_for_mandatory_pattern_in_file(
pattern_list, filepath, failed)
if failed:
summary_message = (
'%s Mandatory pattern check failed, see errors above for'
'patterns that should be added.' % _MESSAGE_TYPE_FAILED)
else:
summary_message = (
'%s Mandatory pattern check passed' % (
_MESSAGE_TYPE_SUCCESS))
python_utils.PRINT(summary_message)
python_utils.PRINT('')
summary_messages.append(summary_message)
self.process_manager['mandatory'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_bad_patterns(self):
"""This function is used for detecting bad patterns."""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting Pattern Checks')
python_utils.PRINT('----------------------------------------')
total_files_checked = 0
total_error_count = 0
summary_messages = []
all_filepaths = [
filepath for filepath in self.all_filepaths if not (
filepath.endswith('pre_commit_linter.py') or
any(
fnmatch.fnmatch(filepath, pattern)
for pattern in EXCLUDED_PATHS)
)]
failed = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
for filepath in all_filepaths:
file_content = FILE_CACHE.read(filepath)
total_files_checked += 1
for pattern in BAD_PATTERNS:
if (pattern in file_content and
not _is_filepath_excluded_for_bad_patterns_check(
pattern, filepath)):
failed = True
python_utils.PRINT('%s --> %s' % (
filepath, BAD_PATTERNS[pattern]['message']))
python_utils.PRINT('')
total_error_count += 1
for regexp in BAD_PATTERNS_REGEXP:
if _check_bad_pattern_in_file(
filepath, file_content, regexp):
failed = True
total_error_count += 1
temp_failed, temp_count = _check_file_type_specific_bad_pattern(
filepath, file_content)
failed = failed or temp_failed
total_error_count += temp_count
if filepath == 'constants.ts':
for pattern in REQUIRED_STRINGS_CONSTANTS:
if pattern not in file_content:
failed = True
python_utils.PRINT('%s --> %s' % (
filepath,
REQUIRED_STRINGS_CONSTANTS[pattern]['message']))
python_utils.PRINT('')
total_error_count += 1
if failed:
summary_message = (
'%s Pattern check failed, see errors above '
'for patterns that should be removed.' % (
_MESSAGE_TYPE_FAILED))
summary_messages.append(summary_message)
else:
summary_message = '%s Pattern checks passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT('')
if total_files_checked == 0:
python_utils.PRINT('There are no files to be checked.')
else:
python_utils.PRINT('(%s files checked, %s errors found)' % (
total_files_checked, total_error_count))
python_utils.PRINT(summary_message)
self.process_manager['bad_pattern'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_patterns(self):
"""Run checks relate to bad patterns."""
methods = [self._check_bad_patterns, self._check_mandatory_patterns]
self._run_multiple_checks(*methods)
def perform_all_lint_checks(self):
"""Perform all the lint checks and returns the messages returned by all
the checks.
Returns:
all_messages: str. All the messages returned by the lint checks.
"""
self._check_patterns()
mandatory_patterns_messages = self.process_manager['mandatory']
pattern_messages = self.process_manager['bad_pattern']
return (
mandatory_patterns_messages + pattern_messages)
class JsTsLintChecksManager(LintChecksManager):
"""Manages all the Js and Ts linting functions.
Attributes:
all_filepaths: list(str). The list of filepaths to be linted.
js_filepaths: list(str): The list of js filepaths to be linted.
ts_filepaths: list(str): The list of ts filepaths to be linted.
parsed_js_and_ts_files: dict. Contains the content of JS files, after
validating and parsing the files.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
def __init__(self, verbose_mode_enabled=False):
"""Constructs a JsTsLintChecksManager object.
Args:
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
os.environ['PATH'] = '%s/bin:' % NODE_DIR + os.environ['PATH']
super(JsTsLintChecksManager, self).__init__(
verbose_mode_enabled=verbose_mode_enabled)
self.parsed_js_and_ts_files = []
self.parsed_expressions_in_files = []
@property
def js_filepaths(self):
"""Return all js filepaths."""
return _FILES['.js']
@property
def ts_filepaths(self):
"""Return all ts filepaths."""
return _FILES['.ts']
@property
def all_filepaths(self):
"""Return all filepaths."""
return self.js_filepaths + self.ts_filepaths
def _validate_and_parse_js_and_ts_files(self):
"""This function validates JavaScript and Typescript files and
returns the parsed contents as a Python dictionary.
Returns:
dict. contains the contents of js and ts files after
validating and parsing the files.
"""
# Select JS files which need to be checked.
files_to_check = [
filepath for filepath in self.all_filepaths if
not any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
parsed_js_and_ts_files = dict()
if not files_to_check:
return parsed_js_and_ts_files
compiled_js_dir = tempfile.mkdtemp(
dir=os.getcwd(), prefix='tmpcompiledjs')
if not self.verbose_mode_enabled:
python_utils.PRINT('Validating and parsing JS and TS files ...')
for filepath in files_to_check:
if self.verbose_mode_enabled:
python_utils.PRINT(
'Validating and parsing %s file ...' % filepath)
file_content = FILE_CACHE.read(filepath)
try:
# Use esprima to parse a JS or TS file.
parsed_js_and_ts_files[filepath] = esprima.parseScript(
file_content, comment=True)
except Exception as e:
# Compile typescript file which has syntax not valid for JS
# file.
if filepath.endswith('.js'):
shutil.rmtree(compiled_js_dir)
raise Exception(e)
try:
compiled_js_filepath = self._compile_ts_file(
filepath, compiled_js_dir)
file_content = FILE_CACHE.read(compiled_js_filepath)
parsed_js_and_ts_files[filepath] = esprima.parseScript(
file_content)
except Exception as e:
shutil.rmtree(compiled_js_dir)
raise Exception(e)
shutil.rmtree(compiled_js_dir)
return parsed_js_and_ts_files
def _get_expressions_from_parsed_script(self):
"""This function returns the expressions in the script parsed using
js and ts files.
Returns:
dict. contains the expressions in the script parsed using js
and ts files.
"""
parsed_expressions_in_files = collections.defaultdict(dict)
components_to_check = ['controller', 'directive', 'factory', 'filter']
for filepath, parsed_script in self.parsed_js_and_ts_files.items():
parsed_expressions_in_files[filepath] = collections.defaultdict(
list)
parsed_nodes = parsed_script.body
for parsed_node in parsed_nodes:
for component in components_to_check:
expression = _get_expression_from_node_if_one_exists(
parsed_node, [component])
parsed_expressions_in_files[filepath][component].append(
expression)
return parsed_expressions_in_files
def _compile_ts_file(self, filepath, dir_path):
"""Compiles a typescript file and returns the path for compiled
js file.
"""
allow_js = 'true'
lib = 'es2017,dom'
no_implicit_use_strict = 'true'
skip_lib_check = 'true'
target = 'es5'
type_roots = './node_modules/@types'
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
dir_path, allow_js, lib, no_implicit_use_strict,
skip_lib_check, target, type_roots, filepath)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compiled_js_filepath = os.path.join(
dir_path, os.path.basename(filepath).replace('.ts', '.js'))
return compiled_js_filepath
def _check_extra_js_files(self):
"""Checks if the changes made include extra js files in core
or extensions folder which are not specified in
build.JS_FILEPATHS_NOT_TO_BUILD.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting extra js files check')
python_utils.PRINT('----------------------------------------')
summary_messages = []
failed = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
js_files_to_check = self.js_filepaths
for filepath in js_files_to_check:
if filepath.startswith(('core/templates', 'extensions')) and (
filepath not in build.JS_FILEPATHS_NOT_TO_BUILD) and (
not filepath.endswith('protractor.js')):
python_utils.PRINT(
'%s --> Found extra .js file\n' % filepath)
failed = True
if failed:
err_msg = (
'If you want the above files to be present as js files, '
'add them to the list JS_FILEPATHS_NOT_TO_BUILD in '
'build.py. Otherwise, rename them to .ts\n')
python_utils.PRINT(err_msg)
if failed:
summary_message = (
'%s Extra JS files check failed, see '
'message above on resolution steps.' % (
_MESSAGE_TYPE_FAILED))
else:
summary_message = '%s Extra JS files check passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT(summary_message)
python_utils.PRINT('')
self.process_manager['extra'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_js_and_ts_component_name_and_count(self):
"""This function ensures that all JS/TS files have exactly
one component and and that the name of the component
matches the filename.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting js component name and count check')
python_utils.PRINT('----------------------------------------')
# Select JS files which need to be checked.
files_to_check = [
filepath for filepath in self.all_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)
and (not filepath.endswith('App.ts'))]
failed = False
summary_messages = []
components_to_check = ['controller', 'directive', 'factory', 'filter']
stdout = python_utils.string_io()
for filepath in files_to_check:
component_num = 0
parsed_expressions = self.parsed_expressions_in_files[filepath]
with _redirect_stdout(stdout):
for component in components_to_check:
if component_num > 1:
break
for expression in parsed_expressions[component]:
if not expression:
continue
component_num += 1
# Check if the number of components in each file exceeds
# one.
if component_num > 1:
python_utils.PRINT(
'%s -> Please ensure that there is exactly one '
'component in the file.' % (filepath))
failed = True
break
with _redirect_stdout(stdout):
if failed:
summary_message = (
'%s JS and TS Component name and count check failed, '
'see messages above for duplicate names.' % (
_MESSAGE_TYPE_FAILED))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = (
'%s JS and TS Component name and count check passed' %
(_MESSAGE_TYPE_SUCCESS))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
python_utils.PRINT('')
self.process_manager['component'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_directive_scope(self):
"""This function checks that all directives have an explicit
scope: {} and it should not be scope: true.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting directive scope check')
python_utils.PRINT('----------------------------------------')
# Select JS and TS files which need to be checked.
files_to_check = [
filepath for filepath in self.all_filepaths if
not any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
failed = False
summary_messages = []
components_to_check = ['directive']
stdout = python_utils.string_io()
for filepath in files_to_check:
parsed_expressions = self.parsed_expressions_in_files[filepath]
with _redirect_stdout(stdout):
# Parse the body of the content as nodes.
for component in components_to_check:
for expression in parsed_expressions[component]:
if not expression:
continue
# Separate the arguments of the expression.
arguments = expression.arguments
# The first argument of the expression is the
# name of the directive.
if arguments[0].type == 'Literal':
directive_name = str(arguments[0].value)
arguments = arguments[1:]
for argument in arguments:
# Check the type of an argument.
if argument.type != 'ArrayExpression':
continue
# Separate out the elements for the argument.
elements = argument.elements
for element in elements:
# Check the type of an element.
if element.type != 'FunctionExpression':
continue
# Separate out the body of the element.
body = element.body
if body.type != 'BlockStatement':
continue
# Further separate the body elements from the
# body.
body_elements = body.body
for body_element in body_elements:
# Check if the body element is a return
# statement.
body_element_type_is_not_return = (
body_element.type != 'ReturnStatement')
body_element_arg_type_is_not_object = (
body_element.argument.type != (
'ObjectExpression'))
if (
body_element_arg_type_is_not_object
or (
body_element_type_is_not_return
)):
continue
# Separate the properties of the return
# node.
return_node_properties = (
body_element.argument.properties)
# Loop over all the properties of the return
# node to find out the scope key.
for return_node_property in (
return_node_properties):
# Check whether the property is scope.
property_key_is_an_identifier = (
return_node_property.key.type == (
'Identifier'))
property_key_name_is_scope = (
return_node_property.key.name == (
'scope'))
if (
property_key_is_an_identifier
and (
property_key_name_is_scope
)):
# Separate the scope value and
# check if it is an Object
# Expression. If it is not, then
# check for scope: true and report
# the error message.
scope_value = (
return_node_property.value)
if (
scope_value.type == (
'Literal')
and (
scope_value.value)):
failed = True
python_utils.PRINT(
'Please ensure that %s '
'directive in %s file '
'does not have scope set '
'to true.' %
(directive_name, filepath))
python_utils.PRINT('')
elif scope_value.type != (
'ObjectExpression'):
# Check whether the directive
# has scope: {} else report
# the error message.
failed = True
python_utils.PRINT(
'Please ensure that %s '
'directive in %s file has '
'a scope: {}.' % (
directive_name, filepath
))
python_utils.PRINT('')
with _redirect_stdout(stdout):
if failed:
summary_message = (
'%s Directive scope check failed, '
'see messages above for suggested fixes.' % (
_MESSAGE_TYPE_FAILED))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = '%s Directive scope check passed' % (
_MESSAGE_TYPE_SUCCESS)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
python_utils.PRINT('')
self.process_manager['directive'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_sorted_dependencies(self):
"""This function checks that the dependencies which are
imported in the controllers/directives/factories in JS
files are in following pattern: dollar imports, regular
imports, and constant imports, all in sorted order.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting sorted dependencies check')
python_utils.PRINT('----------------------------------------')
files_to_check = [
filepath for filepath in self.all_filepaths if
not any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
components_to_check = ['controller', 'directive', 'factory']
failed = False
summary_messages = []
stdout = python_utils.string_io()
for filepath in files_to_check:
parsed_expressions = self.parsed_expressions_in_files[filepath]
with _redirect_stdout(stdout):
for component in components_to_check:
for expression in parsed_expressions[component]:
if not expression:
continue
# Separate the arguments of the expression.
arguments = expression.arguments
if arguments[0].type == 'Literal':
property_value = str(arguments[0].value)
arguments = arguments[1:]
for argument in arguments:
if argument.type != 'ArrayExpression':
continue
literal_args = []
function_args = []
dollar_imports = []
regular_imports = []
constant_imports = []
elements = argument.elements
for element in elements:
if element.type == 'Literal':
literal_args.append(str(element.value))
elif element.type == 'FunctionExpression':
func_args = element.params
for func_arg in func_args:
function_args.append(str(func_arg.name))
for arg in function_args:
if arg.startswith('$'):
dollar_imports.append(arg)
elif re.search('[a-z]', arg):
regular_imports.append(arg)
else:
constant_imports.append(arg)
dollar_imports.sort()
regular_imports.sort()
constant_imports.sort()
sorted_imports = (
dollar_imports + regular_imports + (
constant_imports))
if sorted_imports != function_args:
failed = True
python_utils.PRINT(
'Please ensure that in %s in file %s, the '
'injected dependencies should be in the '
'following manner: dollar imports, regular '
'imports and constant imports, all in '
'sorted order.'
% (property_value, filepath))
if sorted_imports != literal_args:
failed = True
python_utils.PRINT(
'Please ensure that in %s in file %s, the '
'stringfied dependencies should be in the '
'following manner: dollar imports, regular '
'imports and constant imports, all in '
'sorted order.'
% (property_value, filepath))
with _redirect_stdout(stdout):
if failed:
summary_message = (
'%s Sorted dependencies check failed, fix files that '
'that don\'t have sorted dependencies mentioned above.' % (
_MESSAGE_TYPE_FAILED))
else:
summary_message = (
'%s Sorted dependencies check passed' % (
_MESSAGE_TYPE_SUCCESS))
summary_messages.append(summary_message)
python_utils.PRINT('')
python_utils.PRINT(summary_message)
if self.verbose_mode_enabled:
python_utils.PRINT('----------------------------------------')
self.process_manager['sorted'] = summary_messages
_STDOUT_LIST.append(stdout)
def _match_line_breaks_in_controller_dependencies(self):
"""This function checks whether the line breaks between the dependencies
listed in the controller of a directive or service exactly match those
between the arguments of the controller function.
"""
if self.verbose_mode_enabled:
python_utils.PRINT(
'Starting controller dependency line break check')
python_utils.PRINT('----------------------------------------')
files_to_check = [
filepath for filepath in self.all_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
failed = False
summary_messages = []
# For RegExp explanation, please see https://regex101.com/r/T85GWZ/2/.
pattern_to_match = (
r'controller.* \[(?P<stringfied_dependencies>[\S\s]*?)' +
r'function\((?P<function_parameters>[\S\s]*?)\)')
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
for filepath in files_to_check:
file_content = FILE_CACHE.read(filepath)
matched_patterns = re.findall(pattern_to_match, file_content)
for matched_pattern in matched_patterns:
stringfied_dependencies, function_parameters = (
matched_pattern)
stringfied_dependencies = (
stringfied_dependencies.strip().replace(
'\'', '').replace(' ', ''))[:-1]
function_parameters = (
function_parameters.strip().replace(' ', ''))
if stringfied_dependencies != function_parameters:
failed = True
python_utils.PRINT(
'Please ensure that in file %s the line breaks '
'pattern between the dependencies mentioned as '
'strings:\n[%s]\nand the dependencies mentioned '
'as function parameters: \n(%s)\nfor the '
'corresponding controller should '
'exactly match.' % (
filepath, stringfied_dependencies,
function_parameters))
python_utils.PRINT('')
if failed:
summary_message = (
'%s Controller dependency line break check failed, '
'see messages above for the affected files.' % (
_MESSAGE_TYPE_FAILED))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = (
'%s Controller dependency line break check passed' % (
_MESSAGE_TYPE_SUCCESS))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
python_utils.PRINT('')
self.process_manager['line_breaks'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_constants_declaration(self):
"""Checks the declaration of constants in the TS files to ensure that
the constants are not declared in files other than *.constants.ajs.ts
and that the constants are declared only single time. This also checks
that the constants are declared in both *.constants.ajs.ts (for
AngularJS) and in *.constants.ts (for Angular 8).
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting constants declaration check')
python_utils.PRINT('----------------------------------------')
summary_messages = []
failed = False
with _redirect_stdout(_TARGET_STDOUT):
ts_files_to_check = self.ts_filepaths
constants_to_source_filepaths_dict = {}
angularjs_source_filepaths_to_constants_dict = {}
for filepath in ts_files_to_check:
# The following block extracts the corresponding Angularjs
# constants file for the Angular constants file. This is
# required since the check cannot proceed if the AngularJS
# constants file is not provided before the Angular constants
# file.
if filepath.endswith('.constants.ts'):
filename_without_extension = filepath[:-3]
corresponding_angularjs_filepath = (
filename_without_extension + '.ajs.ts')
compiled_js_dir = tempfile.mkdtemp(dir=os.getcwd())
try:
if os.path.isfile(corresponding_angularjs_filepath):
compiled_js_filepath = self._compile_ts_file(
corresponding_angularjs_filepath,
compiled_js_dir)
file_content = FILE_CACHE.read(
compiled_js_filepath).decode('utf-8')
parsed_script = esprima.parseScript(file_content)
parsed_nodes = parsed_script.body
angularjs_constants_list = []
components_to_check = ['constant']
for parsed_node in parsed_nodes:
expression = (
_get_expression_from_node_if_one_exists(
parsed_node, components_to_check))
if not expression:
continue
else:
# The following block populates a set to
# store constants for the Angular-AngularJS
# constants file consistency check.
angularjs_constants_name = (
expression.arguments[0].value)
angularjs_constants_value = (
expression.arguments[1].property.name)
if angularjs_constants_value != (
angularjs_constants_name):
failed = True
python_utils.PRINT(
'%s --> Please ensure that the '
'constant %s is initialized '
'from the value from the '
'corresponding Angular constants'
' file (the *.constants.ts '
'file). Please create one in the'
' Angular constants file if it '
'does not exist there.' % (
filepath,
angularjs_constants_name))
angularjs_constants_list.append(
angularjs_constants_name)
angularjs_constants_set = set(
angularjs_constants_list)
if len(angularjs_constants_set) != len(
angularjs_constants_list):
failed = True
python_utils.PRINT(
'%s --> Duplicate constant declaration '
'found.' % (
corresponding_angularjs_filepath))
angularjs_source_filepaths_to_constants_dict[
corresponding_angularjs_filepath] = (
angularjs_constants_set)
else:
failed = True
python_utils.PRINT(
'%s --> Corresponding AngularJS constants '
'file not found.' % filepath)
finally:
shutil.rmtree(compiled_js_dir)
# Check that the constants are declared only in a
# *.constants.ajs.ts file.
if not filepath.endswith('.constants.ajs.ts'):
for line_num, line in enumerate(FILE_CACHE.readlines(
filepath)):
if 'oppia.constant(' in line:
failed = True
python_utils.PRINT(
'%s --> Constant declaration found at line '
'%s. Please declare the constants in a '
'separate constants file.' % (
filepath, line_num))
# Check if the constant has multiple declarations which is
# prohibited.
parsed_script = self.parsed_js_and_ts_files[filepath]
parsed_nodes = parsed_script.body
components_to_check = ['constant']
angular_constants_list = []
for parsed_node in parsed_nodes:
expression = _get_expression_from_node_if_one_exists(
parsed_node, components_to_check)
if not expression:
continue
else:
constant_name = expression.arguments[0].raw
if constant_name in constants_to_source_filepaths_dict:
failed = True
python_utils.PRINT(
'%s --> The constant %s is already declared '
'in %s. Please import the file where the '
'constant is declared or rename the constant'
'.' % (
filepath, constant_name,
constants_to_source_filepaths_dict[
constant_name]))
else:
constants_to_source_filepaths_dict[
constant_name] = filepath
# Checks that the *.constants.ts and the corresponding
# *.constants.ajs.ts file are in sync.
if filepath.endswith('.constants.ts'):
angular_constants_nodes = (
parsed_nodes[1].declarations[0].init.callee.body.body)
for angular_constant_node in angular_constants_nodes:
if not angular_constant_node.expression:
continue
angular_constant_name = (
angular_constant_node.expression.left.property.name)
angular_constants_list.append(angular_constant_name)
angular_constants_set = set(angular_constants_list)
if len(angular_constants_set) != len(
angular_constants_list):
failed = True
python_utils.PRINT(
'%s --> Duplicate constant declaration found.'
% filepath)
if corresponding_angularjs_filepath in (
angularjs_source_filepaths_to_constants_dict):
angular_minus_angularjs_constants = (
angular_constants_set.difference(
angularjs_source_filepaths_to_constants_dict[
corresponding_angularjs_filepath]))
for constant in angular_minus_angularjs_constants:
failed = True
python_utils.PRINT(
'%s --> The constant %s is not declared '
'in the corresponding angularjs '
'constants file.' % (filepath, constant))
if failed:
summary_message = (
'%s Constants declaration check failed, '
'see messages above for constants with errors.' % (
_MESSAGE_TYPE_FAILED))
else:
summary_message = '%s Constants declaration check passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT(summary_message)
return summary_messages
def _check_dependencies(self):
"""Check the dependencies related issues. This runs
_check_sorted_dependencies and
_match_line_breaks_in_controller_dependencies
in parallel.
"""
methods = [
self._check_sorted_dependencies,
self._match_line_breaks_in_controller_dependencies
]
super(JsTsLintChecksManager, self)._run_multiple_checks(*methods)
def perform_all_lint_checks(self):
"""Perform all the lint checks and returns the messages returned by all
the checks.
Returns:
all_messages: str. All the messages returned by the lint checks.
"""
self.parsed_js_and_ts_files = self._validate_and_parse_js_and_ts_files()
self.parsed_expressions_in_files = (
self._get_expressions_from_parsed_script())
common_messages = super(
JsTsLintChecksManager, self).perform_all_lint_checks()
super(JsTsLintChecksManager, self)._run_multiple_checks(
self._check_extra_js_files,
self._check_js_and_ts_component_name_and_count,
self._check_directive_scope
)
self._check_dependencies()
extra_js_files_messages = self.process_manager['extra']
js_and_ts_component_messages = self.process_manager['component']
directive_scope_messages = self.process_manager['directive']
sorted_dependencies_messages = self.process_manager['sorted']
controller_dependency_messages = self.process_manager['line_breaks']
all_messages = (
common_messages + extra_js_files_messages +
js_and_ts_component_messages + directive_scope_messages +
sorted_dependencies_messages + controller_dependency_messages)
return all_messages
def _check_html_directive_name(self):
"""This function checks that all HTML directives end
with _directive.html.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting HTML directive name check')
python_utils.PRINT('----------------------------------------')
total_files_checked = 0
total_error_count = 0
files_to_check = [
filepath for filepath in self.all_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
failed = False
summary_messages = []
# For RegExp explanation, please see https://regex101.com/r/gU7oT6/37.
pattern_to_match = (
r'templateUrl: UrlInterpolationService\.[A-z\(]+' +
r'(?P<directive_name>[^\)]+)')
with _redirect_stdout(_TARGET_STDOUT):
for filepath in files_to_check:
file_content = FILE_CACHE.read(filepath)
total_files_checked += 1
matched_patterns = re.findall(pattern_to_match, file_content)
for matched_pattern in matched_patterns:
matched_pattern = matched_pattern.split()
directive_filepath = ''.join(matched_pattern).replace(
'\'', '').replace('+', '')
if not directive_filepath.endswith('_directive.html'):
failed = True
total_error_count += 1
python_utils.PRINT(
'%s --> Please ensure that this file ends'
'with _directive.html.' % directive_filepath)
python_utils.PRINT('')
if failed:
summary_message = (
'%s HTML directive name check failed, see files above '
'that did not end with _directive.html but '
'should have.' % _MESSAGE_TYPE_FAILED)
summary_messages.append(summary_message)
else:
summary_message = '%s HTML directive name check passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT('')
if total_files_checked == 0:
if self.verbose_mode_enabled:
python_utils.PRINT('There are no files to be checked.')
else:
python_utils.PRINT('(%s files checked, %s errors found)' % (
total_files_checked, total_error_count))
python_utils.PRINT(summary_message)
return summary_messages
class OtherLintChecksManager(LintChecksManager):
"""Manages all the linting functions except the ones against Js and Ts. It
checks Python, CSS, and HTML files.
Attributes:
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
def __init__(
self, verbose_mode_enabled=False):
"""Constructs a OtherLintChecksManager object.
Args:
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
super(OtherLintChecksManager, self).__init__(
verbose_mode_enabled=verbose_mode_enabled)
@property
def py_filepaths(self):
"""Return all python filepaths."""
return _FILES['.py']
@property
def html_filepaths(self):
"""Return all html filepaths."""
return _FILES['.html']
@property
def other_filepaths(self):
"""Return other filepaths."""
return _FILES['other']
@property
def css_filepaths(self):
"""Return css filepaths."""
return _FILES['.css']
@property
def all_filepaths(self):
"""Return all filepaths."""
return (
self.css_filepaths + self.html_filepaths +
self.other_filepaths + self.py_filepaths)
def _check_import_order(self):
"""This function is used to check that each file
has imports placed in alphabetical order.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting import-order checks')
python_utils.PRINT('----------------------------------------')
summary_messages = []
files_to_check = [
filepath for filepath in self.py_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
failed = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
for filepath in files_to_check:
# This line prints the error message along with file path
# and returns True if it finds an error else returns False
# If check is set to True, isort simply checks the file and
# if check is set to False, it autocorrects import-order errors.
if (isort.SortImports(
filepath, check=True, show_diff=(
True)).incorrectly_sorted):
failed = True
python_utils.PRINT('')
python_utils.PRINT('')
if failed:
summary_message = (
'%s Import order checks failed, file imports should be '
'alphabetized, see affect files above.' % (
_MESSAGE_TYPE_FAILED))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = (
'%s Import order checks passed' % _MESSAGE_TYPE_SUCCESS)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
self.process_manager['import'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_import(self):
"""Run checks relates to import order."""
methods = [self._check_import_order]
super(OtherLintChecksManager, self)._run_multiple_checks(*methods)
def _check_docstring(self):
"""Run checks related to docstring."""
methods = [self._check_docstrings]
super(OtherLintChecksManager, self)._run_multiple_checks(*methods)
def _check_docstrings(self):
"""This function ensures that docstrings end in a period and the arg
order in the function definition matches the order in the doc string.
Returns:
summary_messages: list(str). Summary of messages generated by the
check.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting docstring checks')
python_utils.PRINT('----------------------------------------')
summary_messages = []
files_to_check = [
filepath for filepath in self.py_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
missing_period_message = (
'There should be a period at the end of the docstring.')
multiline_docstring_message = (
'Multiline docstring should end with a new line.')
single_line_docstring_message = (
'Single line docstring should not span two lines. '
'If line length exceeds 80 characters, '
'convert the single line docstring to a multiline docstring.')
previous_line_message = (
'There should not be any empty lines before the end of '
'the multi-line docstring.')
space_after_triple_quotes_in_docstring_message = (
'There should be no space after """ in docstring.')
failed = False
is_docstring = False
is_class_or_function = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
for filepath in files_to_check:
file_content = FILE_CACHE.readlines(filepath)
file_length = len(file_content)
for line_num in python_utils.RANGE(file_length):
line = file_content[line_num].strip()
prev_line = ''
if line_num > 0:
prev_line = file_content[line_num - 1].strip()
# Check if it is a docstring and not some multi-line string.
if (prev_line.startswith('class ') or
prev_line.startswith('def ')) or (
is_class_or_function):
is_class_or_function = True
if prev_line.endswith('):') and (
line.startswith('"""')):
is_docstring = True
is_class_or_function = False
# Check for space after """ in docstring.
if re.match(r'^""".+$', line) and is_docstring and (
line[3] == ' '):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num + 1,
space_after_triple_quotes_in_docstring_message))
python_utils.PRINT('')
is_docstring = False
# Check if single line docstring span two lines.
if line == '"""' and prev_line.startswith('"""') and (
is_docstring):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num, single_line_docstring_message))
python_utils.PRINT('')
is_docstring = False
# Check for single line docstring.
elif re.match(r'^""".+"""$', line) and is_docstring:
# Check for punctuation at line[-4] since last three
# characters are double quotes.
if (len(line) > 6) and (
line[-4] not in
ALLOWED_TERMINATING_PUNCTUATIONS):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num + 1, missing_period_message))
python_utils.PRINT('')
is_docstring = False
# Check for multiline docstring.
elif line.endswith('"""') and is_docstring:
# Case 1: line is """. This is correct for multiline
# docstring.
if line == '"""':
# Check for empty line before the end of docstring.
if prev_line == '':
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num, previous_line_message))
python_utils.PRINT('')
# Check for punctuation at end of docstring.
else:
last_char_is_invalid = prev_line[-1] not in (
ALLOWED_TERMINATING_PUNCTUATIONS)
no_word_is_present_in_excluded_phrases = (
not any(
word in prev_line for word in(
EXCLUDED_PHRASES)))
if last_char_is_invalid and (
no_word_is_present_in_excluded_phrases):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num,
missing_period_message))
python_utils.PRINT('')
# Case 2: line contains some words before """. """
# should shift to next line.
elif not any(word in line for word in EXCLUDED_PHRASES):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num + 1,
multiline_docstring_message))
python_utils.PRINT('')
is_docstring = False
docstring_checker = docstrings_checker.ASTDocStringChecker()
for filepath in files_to_check:
ast_file = ast.walk(
ast.parse(
python_utils.convert_to_bytes(
FILE_CACHE.read(filepath))))
func_defs = [n for n in ast_file if isinstance(
n, ast.FunctionDef)]
for func in func_defs:
# Check that the args in the docstring are listed in the
# same order as they appear in the function definition.
func_result = docstring_checker.check_docstrings_arg_order(
func)
for error_line in func_result:
python_utils.PRINT('%s --> Func %s: %s' % (
filepath, func.name, error_line))
python_utils.PRINT('')
failed = True
python_utils.PRINT('')
if failed:
summary_message = (
'%s Docstring check failed, see files above with bad'
'docstrings to be fixed.' % _MESSAGE_TYPE_FAILED)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = (
'%s Docstring check passed' % _MESSAGE_TYPE_SUCCESS)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
self.process_manager['docstrings'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_html_tags_and_attributes(self, debug=False):
"""This function checks the indentation of lines in HTML files."""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting HTML tag and attribute check')
python_utils.PRINT('----------------------------------------')
html_files_to_lint = self.html_filepaths
failed = False
summary_messages = []
with _redirect_stdout(_TARGET_STDOUT):
for filepath in html_files_to_lint:
file_content = FILE_CACHE.read(filepath)
file_lines = FILE_CACHE.readlines(filepath)
parser = CustomHTMLParser(filepath, file_lines, debug)
parser.feed(file_content)
if len(parser.tag_stack) != 0:
raise TagMismatchException('Error in file %s\n' % filepath)
if parser.failed:
failed = True
if failed:
summary_message = (
'%s HTML tag and attribute check failed, fix the HTML '
'files listed above.' % _MESSAGE_TYPE_FAILED)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = '%s HTML tag and attribute check passed' % (
_MESSAGE_TYPE_SUCCESS)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
python_utils.PRINT('')
return summary_messages
def _lint_html_files(self):
"""This function is used to check HTML files for linting errors."""
node_path = os.path.join(NODE_DIR, 'bin', 'node')
htmllint_path = os.path.join(
'node_modules', 'htmllint-cli', 'bin', 'cli.js')
error_summary = []
total_error_count = 0
summary_messages = []
htmllint_cmd_args = [node_path, htmllint_path, '--rc=.htmllintrc']
html_files_to_lint = self.html_filepaths
if self.verbose_mode_enabled:
python_utils.PRINT('Starting HTML linter...')
python_utils.PRINT('----------------------------------------')
python_utils.PRINT('')
if not self.verbose_mode_enabled:
python_utils.PRINT('Linting HTML files.')
for filepath in html_files_to_lint:
proc_args = htmllint_cmd_args + [filepath]
if self.verbose_mode_enabled:
python_utils.PRINT('Linting %s file' % filepath)
with _redirect_stdout(_TARGET_STDOUT):
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
encoded_linter_stdout, _ = proc.communicate()
linter_stdout = encoded_linter_stdout.decode(encoding='utf-8')
# This line splits the output of the linter and extracts digits
# from it. The digits are stored in a list. The second last
# digit in the list represents the number of errors in the file.
error_count = (
[int(s) for s in linter_stdout.split() if s.isdigit()][-2])
if error_count:
error_summary.append(error_count)
python_utils.PRINT(linter_stdout)
with _redirect_stdout(_TARGET_STDOUT):
if self.verbose_mode_enabled:
python_utils.PRINT('----------------------------------------')
for error_count in error_summary:
total_error_count += error_count
total_files_checked = len(html_files_to_lint)
if total_error_count:
python_utils.PRINT('(%s files checked, %s errors found)' % (
total_files_checked, total_error_count))
summary_message = (
'%s HTML linting failed, '
'fix the HTML files listed above.' % _MESSAGE_TYPE_FAILED)
summary_messages.append(summary_message)
else:
summary_message = '%s HTML linting passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT('')
python_utils.PRINT(summary_message)
python_utils.PRINT('HTML linting finished.')
python_utils.PRINT('')
return summary_messages
def perform_all_lint_checks(self):
"""Perform all the lint checks and returns the messages returned by all
the checks.
Returns:
all_messages: str. All the messages returned by the lint checks.
"""
common_messages = super(
OtherLintChecksManager, self).perform_all_lint_checks()
# division_operator_messages = self._check_division_operator()
# import_order_messages = self._check_import_order()
self._check_import()
self._check_docstring()
docstring_messages = self.process_manager['docstrings']
# The html tags and attributes check has an additional
# debug mode which when enabled prints the tag_stack for each file.
html_tag_and_attribute_messages = (
self._check_html_tags_and_attributes())
html_linter_messages = self._lint_html_files()
import_order_messages = self.process_manager['import']
all_messages = (
import_order_messages + common_messages +
docstring_messages + html_tag_and_attribute_messages +
html_linter_messages)
return all_messages
def _print_complete_summary_of_errors():
"""Print complete summary of errors."""
error_messages = _TARGET_STDOUT.getvalue()
piped_messages = ''.join([x.getvalue() for x in _STDOUT_LIST])
error_messages += piped_messages
if error_messages != '':
python_utils.PRINT('Summary of Errors:')
python_utils.PRINT('----------------------------------------')
python_utils.PRINT(error_messages)
def read_files(file_paths):
"""Read all files to be checked and cache them. This will spin off multiple
threads to increase the efficiency.
"""
threads = []
for file_path in file_paths:
thread = threading.Thread(target=FILE_CACHE.read, args=(file_path,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def categorize_files(file_paths):
"""Categorize all the files and store them in shared variable _FILES."""
all_filepaths_dict = {
'.py': [], '.html': [], '.ts': [], '.js': [], 'other': [], '.css': []
}
for file_path in file_paths:
_, extension = os.path.splitext(file_path)
if extension in all_filepaths_dict:
all_filepaths_dict[extension].append(file_path)
else:
all_filepaths_dict['other'].append(file_path)
_FILES.update(all_filepaths_dict)
def _join_linting_process(linting_processes, result_queues, result_stdouts):
"""Join process spawn off by _lint_all_files and capture the outputs."""
for process in linting_processes:
process.join()
summary_messages = []
for result_queue in result_queues:
while not result_queue.empty():
summary_messages.append(result_queue.get())
for result_stdout in result_stdouts:
while not result_stdout.empty():
summary_messages.append(result_stdout.get())
with _redirect_stdout(_TARGET_STDOUT):
python_utils.PRINT(b'\n'.join(summary_messages))
python_utils.PRINT('')
python_utils.PRINT('')
return summary_messages
def main(args=None):
"""Main method for pre commit linter script that lints Python, JavaScript,
HTML, and CSS files.
"""
parsed_args = _PARSER.parse_args(args=args)
# Default mode is non-verbose mode, if arguments contains --verbose flag it
# will be made True, which will represent verbose mode.
verbose_mode_enabled = bool(parsed_args.verbose)
all_filepaths = _get_all_filepaths(parsed_args.path, parsed_args.files)
if len(all_filepaths) == 0:
python_utils.PRINT('---------------------------')
python_utils.PRINT('No files to check.')
python_utils.PRINT('---------------------------')
return
read_files(all_filepaths)
categorize_files(all_filepaths)
linting_processes, result_queues, result_stdout = _lint_all_files(
_FILES['.js'], _FILES['.ts'], _FILES['.py'], _FILES['.html'],
_FILES['.css'], verbose_mode_enabled)
code_owner_message = _check_codeowner_file(verbose_mode_enabled)
# Pylint requires to provide paramter "this_bases" and "d", guess due to
# meta class.
js_ts_lint_checks_manager = JsTsLintChecksManager( # pylint: disable=no-value-for-parameter
verbose_mode_enabled)
other_lint_checks_manager = OtherLintChecksManager( # pylint: disable=no-value-for-parameter
verbose_mode_enabled)
all_messages = code_owner_message
js_message = js_ts_lint_checks_manager.perform_all_lint_checks()
other_messages = other_lint_checks_manager.perform_all_lint_checks()
all_messages += js_message + other_messages
all_messages += _join_linting_process(
linting_processes, result_queues, result_stdout)
_print_complete_summary_of_errors()
if any([message.startswith(_MESSAGE_TYPE_FAILED) for message in
all_messages]):
python_utils.PRINT('---------------------------')
python_utils.PRINT('Checks Not Passed.')
python_utils.PRINT('---------------------------')
sys.exit(1)
else:
python_utils.PRINT('---------------------------')
python_utils.PRINT('All Checks Passed.')
python_utils.PRINT('---------------------------')
NAME_SPACE = multiprocessing.Manager().Namespace()
PROCESSES = multiprocessing.Manager().dict()
NAME_SPACE.files = FileCache()
FILE_CACHE = NAME_SPACE.files
if __name__ == '__main__':
main()
|
parallel_levenshtein.py
|
import sys
import getopt
import multiprocessing
INS_COST = 1
DEL_COST = 1
EXC_COST = 1
vLock = multiprocessing.Semaphore(value = 0)
hLock = multiprocessing.Semaphore(value = 0)
def main():
s = ''
t = ''
#parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], 's:t:')
except getopt.GetoptError as err:
print(str(err))
sys.exit(2)
for o, a in opts:
if o == '-s':
s = a
elif o == '-t':
t = a
#call levensthein
print("The levenshtein distance between words: '" + s + "' and '" + t + "' is " + str(levenshtein(s, t)))
#levenshtein distance dynamic programming
def levenshtein(s, t):
len_s = len(s)
len_t = len(t)
#allocate distance matrix
d = [[0 for j in range(len_t+1)] for i in range(len_s+1)]
#initialization of distance matrix
for j in range(0, len_t+1):
d[0][j] = j
for i in range(0, len_s+1):
d[i][0] = i
#call levenshtein
vthread = multiprocessing.Process(target=vlevenshtein, args=(d, s, len_s, t, len_t)) #vertical in new thread
vthread.start() #start new thread
hlevenshtein(d, s, len_s, t, len_t) #horizontal in main thread
#join threads
vthread.join()
return d[len_s][len_t]
def hlevenshtein(d, s, len_s, t, len_t):
rangeValue = min(len_s, len_t)
flagMin = len_t <= len_s
for i in range(1, rangeValue + 1):
#decrease vertical sempahore
if flagMin == 0 or i > 1:
vLock.acquire ()
#computing levenshtein distante for first case
j = i + (flagMin == 0)
if j < (len_t + 1):
if s[i-1] == t[j-1]:
d[i][j] = min(d[i-1][j] + DEL_COST, d[i][j-1] + INS_COST, d[i-1][j-1])
else:
d[i][j] = min(d[i-1][j] + DEL_COST, d[i][j-1] + INS_COST, d[i-1][j-1] + EXC_COST)
#increase horizontal sempahore
hLock.release()
#computing levenshtein distante for others case
for j in range(++j, len_t + 1):
if s[i-1] == t[j-1]:
d[i][j] = min(d[i-1][j] + DEL_COST, d[i][j-1] + INS_COST, d[i-1][j-1])
else:
d[i][j] = min(d[i-1][j] + DEL_COST, d[i][j-1] + INS_COST, d[i-1][j-1] + EXC_COST)
def vlevenshtein(d, s, len_s, t, len_t):
rangeValue = min(len_s, len_t)
flagMin = len_s < len_t
for j in range(1, rangeValue + 1):
#decrease vertical sempahore
if flagMin == 0 or j > 1:
hLock.acquire ()
#computing levenshtein distante for first case
i = j + (flagMin == 0)
if i < (len_s + 1):
if s[i-1] == t[j-1]:
d[i][j] = min(d[i-1][j] + DEL_COST, d[i][j-1] + INS_COST, d[i-1][j-1])
else:
d[i][j] = min(d[i-1][j] + DEL_COST, d[i][j-1] + INS_COST, d[i-1][j-1] + EXC_COST)
#increase horizontal sempahore
vLock.release()
#computing levenshtein distante for others case
for i in range(++i, len_s + 1):
if s[i-1] == t[j-1]:
d[i][j] = min(d[i-1][j] + DEL_COST, d[i][j-1] + INS_COST, d[i-1][j-1])
else:
d[i][j] = min(d[i-1][j] + DEL_COST, d[i][j-1] + INS_COST, d[i-1][j-1] + EXC_COST)
#call main method
if __name__ == "__main__":
main()
|
SendQueue.py
|
# encoding=utf-8
from StringIO import StringIO
from datetime import datetime
from threading import Thread
import requests
import time
import speech_recognition as sr
import wave
from globals import CHANNELS, FORMAT, RATE
class SendQueue(object):
# _url = "https://api.telegram.org/bot{}/sendVoice"
# _url = "https://149.154.167.220:443/bot{}/sendVoice"
_url = "https://149.154.167.220:443/bot{}/sendVoice"
def __init__(self, token, chat_id, with_text=False, PROXY = {}):
super(SendQueue, self).__init__()
self.PROXY = PROXY
self.with_text = with_text
self.chat_id = chat_id
self.url = self._url.format(token)
self.queue = []
self.thread = Thread(target=self.listen)
self.thread.setDaemon(1)
def _to_wav(self, p, frames):
st = StringIO(b'')
wf = wave.open(st, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
return st
def listen(self):
while 1:
self._listen()
time.sleep(1)
def _listen(self):
if self.queue:
p, frames = self.queue.pop()
try:
self._send_message(self._to_wav(p, frames))
except:
self.add(p, frames)
print "SEND --", time.time()
def start(self):
self.thread.start()
def add(self, p, item):
self.queue.append((p, item))
def audio_caption(self, source):
if not self.with_text: return None
source.seek(0)
r = sr.Recognizer()
with sr.AudioFile(message) as source:
audio = r.record(source)
try:
command = r.recognize_google(audio, language='ru-RU')
return command
except:
pass
return None
def _send_message(self, message):
now = datetime.now()
caption = self.audio_caption(message) or "Записано:\n{}".format(now.strftime("%Y-%m-%d\n%H:%M:%S"))
message.seek(0)
msg_file = message.read()
res = requests.post(self.url, verify=False, data={
"chat_id": self.chat_id,
"duration": len(msg_file) / 120000,
"caption": caption
}, files={
"voice": msg_file
}, proxies=self.PROXY, headers={
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.9,ru-RU;q=0.8,ru;q=0.7,la;q=0.6,da;q=0.5,uk;q=0.4",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Host": "api.telegram.org",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1",
})
print res.content
|
test_ipc.py
|
"""
:codeauthor: Mike Place <mp@saltstack.com>
"""
import errno
import logging
import os
import threading
import pytest
import salt.config
import salt.exceptions
import salt.ext.tornado.gen
import salt.ext.tornado.ioloop
import salt.ext.tornado.testing
import salt.transport.client
import salt.transport.ipc
import salt.transport.server
import salt.utils.platform
from tests.support.mock import MagicMock
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
pytestmark = [
pytest.mark.skip_on_darwin,
pytest.mark.skip_on_freebsd,
pytest.mark.skip_on_windows,
]
log = logging.getLogger(__name__)
@skipIf(salt.utils.platform.is_windows(), "Windows does not support Posix IPC")
class BaseIPCReqCase(salt.ext.tornado.testing.AsyncTestCase):
"""
Test the req server/client pair
"""
def setUp(self):
super().setUp()
# self._start_handlers = dict(self.io_loop._handlers)
self.socket_path = os.path.join(RUNTIME_VARS.TMP, "ipc_test.ipc")
self.server_channel = salt.transport.ipc.IPCMessageServer(
self.socket_path,
io_loop=self.io_loop,
payload_handler=self._handle_payload,
)
self.server_channel.start()
self.payloads = []
def tearDown(self):
super().tearDown()
# failures = []
try:
self.server_channel.close()
except OSError as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
os.unlink(self.socket_path)
# for k, v in self.io_loop._handlers.items():
# if self._start_handlers.get(k) != v:
# failures.append((k, v))
# if len(failures) > 0:
# raise Exception('FDs still attached to the IOLoop: {0}'.format(failures))
del self.payloads
del self.socket_path
del self.server_channel
# del self._start_handlers
@salt.ext.tornado.gen.coroutine
def _handle_payload(self, payload, reply_func):
self.payloads.append(payload)
yield reply_func(payload)
if isinstance(payload, dict) and payload.get("stop"):
self.stop()
class IPCMessageClient(BaseIPCReqCase):
"""
Test all of the clear msg stuff
"""
def _get_channel(self):
if not hasattr(self, "channel") or self.channel is None:
self.channel = salt.transport.ipc.IPCMessageClient(
socket_path=self.socket_path, io_loop=self.io_loop,
)
self.channel.connect(callback=self.stop)
self.wait()
return self.channel
def setUp(self):
super().setUp()
self.channel = self._get_channel()
def tearDown(self):
super().tearDown()
try:
# Make sure we close no matter what we've done in the tests
del self.channel
except OSError as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
finally:
self.channel = None
def test_singleton(self):
channel = self._get_channel()
assert self.channel is channel
# Delete the local channel. Since there's still one more refefence
# __del__ wasn't called
del channel
assert self.channel
msg = {"foo": "bar", "stop": True}
self.channel.send(msg)
self.wait()
self.assertEqual(self.payloads[0], msg)
def test_basic_send(self):
msg = {"foo": "bar", "stop": True}
self.channel.send(msg)
self.wait()
self.assertEqual(self.payloads[0], msg)
def test_many_send(self):
msgs = []
self.server_channel.stream_handler = MagicMock()
for i in range(0, 1000):
msgs.append("test_many_send_{}".format(i))
for i in msgs:
self.channel.send(i)
self.channel.send({"stop": True})
self.wait()
self.assertEqual(self.payloads[:-1], msgs)
def test_very_big_message(self):
long_str = "".join([str(num) for num in range(10 ** 5)])
msg = {"long_str": long_str, "stop": True}
self.channel.send(msg)
self.wait()
self.assertEqual(msg, self.payloads[0])
def test_multistream_sends(self):
local_channel = self._get_channel()
for c in (self.channel, local_channel):
c.send("foo")
self.channel.send({"stop": True})
self.wait()
self.assertEqual(self.payloads[:-1], ["foo", "foo"])
def test_multistream_errors(self):
local_channel = self._get_channel()
for c in (self.channel, local_channel):
c.send(None)
for c in (self.channel, local_channel):
c.send("foo")
self.channel.send({"stop": True})
self.wait()
self.assertEqual(self.payloads[:-1], [None, None, "foo", "foo"])
@skipIf(salt.utils.platform.is_windows(), "Windows does not support Posix IPC")
class IPCMessagePubSubCase(salt.ext.tornado.testing.AsyncTestCase):
"""
Test all of the clear msg stuff
"""
def setUp(self):
super().setUp()
self.opts = {"ipc_write_buffer": 0}
self.socket_path = os.path.join(RUNTIME_VARS.TMP, "ipc_test.ipc")
self.pub_channel = self._get_pub_channel()
self.sub_channel = self._get_sub_channel()
def _get_pub_channel(self):
pub_channel = salt.transport.ipc.IPCMessagePublisher(
self.opts, self.socket_path,
)
pub_channel.start()
return pub_channel
def _get_sub_channel(self):
sub_channel = salt.transport.ipc.IPCMessageSubscriber(
socket_path=self.socket_path, io_loop=self.io_loop,
)
sub_channel.connect(callback=self.stop)
self.wait()
return sub_channel
def tearDown(self):
super().tearDown()
try:
self.pub_channel.close()
except OSError as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
try:
self.sub_channel.close()
except OSError as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
os.unlink(self.socket_path)
del self.pub_channel
del self.sub_channel
def test_multi_client_reading(self):
# To be completely fair let's create 2 clients.
client1 = self.sub_channel
client2 = self._get_sub_channel()
call_cnt = []
# Create a watchdog to be safe from hanging in sync loops (what old code did)
evt = threading.Event()
def close_server():
if evt.wait(1):
return
client2.close()
self.stop()
watchdog = threading.Thread(target=close_server)
watchdog.start()
# Runs in ioloop thread so we're safe from race conditions here
def handler(raw):
call_cnt.append(raw)
if len(call_cnt) >= 2:
evt.set()
self.stop()
# Now let both waiting data at once
client1.read_async(handler)
client2.read_async(handler)
self.pub_channel.publish("TEST")
self.wait()
self.assertEqual(len(call_cnt), 2)
self.assertEqual(call_cnt[0], "TEST")
self.assertEqual(call_cnt[1], "TEST")
def test_sync_reading(self):
# To be completely fair let's create 2 clients.
client1 = self.sub_channel
client2 = self._get_sub_channel()
call_cnt = []
# Now let both waiting data at once
self.pub_channel.publish("TEST")
ret1 = client1.read_sync()
ret2 = client2.read_sync()
self.assertEqual(ret1, "TEST")
self.assertEqual(ret2, "TEST")
|
foo-upgraded.py
|
import sys
import os
import argparse
from setup.settings import hparams, preprocessing
import math
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/nmt")
from nmt import nmt
import tensorflow as tf
import colorama
from threading import Thread
from setup.custom_summary import custom_summary
colorama.init()
def train():
print('\n\n{}Training model...{}\n'.format(colorama.Fore.GREEN, colorama.Fore.RESET))
# Custom epoch training and decaying
if preprocessing['epochs'] is not None:
# Load corpus size, calculate number of steps
with open('{}/corpus_size'.format(preprocessing['train_folder']), 'r') as f:
corpus_size = int(f.read())
# Load current train progress
try:
with open('{}epochs_passed'.format(hparams['out_dir']), 'r') as f:
initial_epoch = int(f.read())
except:
initial_epoch = 0
# Iterate thru epochs
for epoch, learning_rate in enumerate(preprocessing['epochs']):
# Check if model already passed that epoch
if epoch < initial_epoch:
print('{}Epoch: {}, learning rate: {} - already passed{}'.format(colorama.Fore.GREEN, epoch + 1, learning_rate, colorama.Fore.RESET))
continue
# Calculate new number of training steps - up to the end of current epoch
num_train_steps = math.ceil((epoch + 1) * corpus_size / (hparams['batch_size'] if 'batch_size' in hparams else 128))
print("\n{}Epoch: {}, steps per epoch: {}, epoch ends at {} steps, learning rate: {} - training{}\n".format(
colorama.Fore.GREEN,
epoch + 1,
math.ceil(corpus_size / (hparams['batch_size'] if 'batch_size' in hparams else 128)),
num_train_steps,
learning_rate,
colorama.Fore.RESET
))
# Override hparams
hparams['num_train_steps'] = num_train_steps
hparams['learning_rate'] = learning_rate
hparams['override_loaded_hparams'] = True
# Run TensorFlow threaded (exits on finished training, but we want to train more)
thread = Thread(target=nmt_train)
thread.start()
thread.join()
# Save epoch progress
with open('{}epochs_passed'.format(hparams['out_dir']), 'w') as f:
f.write(str(epoch + 1))
# Standard training
else:
nmt_train()
print('\n\n{}Training finished{}\n'.format(colorama.Fore.GREEN, colorama.Fore.RESET))
def nmt_train():
# Modified autorun from nmt.py (bottom of the file)
# We want to use original argument parser (for validation, etc)
nmt_parser = argparse.ArgumentParser()
nmt.add_arguments(nmt_parser)
# But we have to hack settings from our config in there instead of commandline options
nmt.FLAGS, unparsed = nmt_parser.parse_known_args(['--'+k+'='+str(v) for k,v in hparams.items()])
# Add custom summary function (hook)
nmt.summary_callback = custom_summary
# And now we can run TF with modified arguments
tf.compat.v1.app.run(main=nmt.main, argv=[os.getcwd() + '\nmt\nmt\nmt.py'] + unparsed)
train()
|
streaming.py
|
# Tweepy
# Copyright 2009-2019 Joshua Roesslein
# See LICENSE for details.
# Appengine users: https://developers.google.com/appengine/docs/python/sockets/#making_httplib_use_sockets
from __future__ import absolute_import
import json
import logging
import re
import requests
import ssl
import sys
from threading import Thread
from time import sleep
import six
from requests.exceptions import Timeout
from tweepy.api import API
from tweepy.error import TweepError
from tweepy.models import Status
STREAM_VERSION = '1.1'
log = logging.getLogger(__name__)
class StreamListener(object):
def __init__(self, api=None):
self.api = api or API()
def on_connect(self):
"""Called once connected to streaming server.
This will be invoked once a successful response
is received from the server. Allows the listener
to perform some work prior to entering the read loop.
"""
pass
def on_data(self, raw_data):
"""Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data. Return False to stop stream and close connection.
"""
data = json.loads(raw_data)
if 'in_reply_to_status_id' in data:
status = Status.parse(self.api, data)
if self.on_status(status) is False:
return False
elif 'delete' in data:
delete = data['delete']['status']
if self.on_delete(delete['id'], delete['user_id']) is False:
return False
elif 'event' in data:
status = Status.parse(self.api, data)
if self.on_event(status) is False:
return False
elif 'direct_message' in data:
status = Status.parse(self.api, data)
if self.on_direct_message(status) is False:
return False
elif 'friends' in data:
if self.on_friends(data['friends']) is False:
return False
elif 'limit' in data:
if self.on_limit(data['limit']['track']) is False:
return False
elif 'disconnect' in data:
if self.on_disconnect(data['disconnect']) is False:
return False
elif 'warning' in data:
if self.on_warning(data['warning']) is False:
return False
elif 'scrub_geo' in data:
if self.on_scrub_geo(data['scrub_geo']) is False:
return False
elif 'status_withheld' in data:
if self.on_status_withheld(data['status_withheld']) is False:
return False
elif 'user_withheld' in data:
if self.on_user_withheld(data['user_withheld']) is False:
return False
else:
log.error("Unknown message type: %s", raw_data)
def keep_alive(self):
"""Called when a keep-alive arrived"""
return
def on_status(self, status):
"""Called when a new status arrives"""
return
def on_exception(self, exception):
"""Called when an unhandled exception occurs."""
return
def on_delete(self, status_id, user_id):
"""Called when a delete notice arrives for a status"""
return
def on_event(self, status):
"""Called when a new event arrives"""
return
def on_direct_message(self, status):
"""Called when a new direct message arrives"""
return
def on_friends(self, friends):
"""Called when a friends list arrives.
friends is a list that contains user_id
"""
return
def on_limit(self, track):
"""Called when a limitation notice arrives"""
return
def on_error(self, status_code):
"""Called when a non-200 status code is returned"""
return False
def on_timeout(self):
"""Called when stream connection times out"""
return
def on_disconnect(self, notice):
"""Called when twitter sends a disconnect notice
Disconnect codes are listed here:
https://developer.twitter.com/en/docs/tweets/filter-realtime/guides/streaming-message-types
"""
return
def on_warning(self, notice):
"""Called when a disconnection warning message arrives"""
return
def on_scrub_geo(self, notice):
"""Called when a location deletion notice arrives"""
return
def on_status_withheld(self, notice):
"""Called when a status withheld content notice arrives"""
return
def on_user_withheld(self, notice):
"""Called when a user withheld content notice arrives"""
return
class ReadBuffer(object):
"""Buffer data from the response in a smarter way than httplib/requests can.
Tweets are roughly in the 2-12kb range, averaging around 3kb.
Requests/urllib3/httplib/socket all use socket.read, which blocks
until enough data is returned. On some systems (eg google appengine), socket
reads are quite slow. To combat this latency we can read big chunks,
but the blocking part means we won't get results until enough tweets
have arrived. That may not be a big deal for high throughput systems.
For low throughput systems we don't want to sacrifice latency, so we
use small chunks so it can read the length and the tweet in 2 read calls.
"""
def __init__(self, stream, chunk_size, encoding='utf-8'):
self._stream = stream
self._buffer = six.b('')
self._chunk_size = chunk_size
self._encoding = encoding
def read_len(self, length):
while not self._stream.closed:
if len(self._buffer) >= length:
return self._pop(length)
read_len = max(self._chunk_size, length - len(self._buffer))
self._buffer += self._stream.read(read_len)
return six.b('')
def read_line(self, sep=six.b('\n')):
"""Read the data stream until a given separator is found (default \n)
:param sep: Separator to read until. Must by of the bytes type (str in python 2,
bytes in python 3)
:return: The str of the data read until sep
"""
start = 0
while not self._stream.closed:
loc = self._buffer.find(sep, start)
if loc >= 0:
return self._pop(loc + len(sep))
else:
start = len(self._buffer)
self._buffer += self._stream.read(self._chunk_size)
return six.b('')
def _pop(self, length):
r = self._buffer[:length]
self._buffer = self._buffer[length:]
return r.decode(self._encoding)
class Stream(object):
def __init__(self, auth, listener, **options):
self.auth = auth
self.listener = listener
self.running = False
self.daemon = options.get("daemon", False)
self.timeout = options.get("timeout", 300.0)
self.retry_count = options.get("retry_count")
# values according to
# https://developer.twitter.com/en/docs/tweets/filter-realtime/guides/connecting#reconnecting
self.retry_time_start = options.get("retry_time", 5.0)
self.retry_420_start = options.get("retry_420", 60.0)
self.retry_time_cap = options.get("retry_time_cap", 320.0)
self.snooze_time_step = options.get("snooze_time", 0.25)
self.snooze_time_cap = options.get("snooze_time_cap", 16)
# The default socket.read size. Default to less than half the size of
# a tweet so that it reads tweets with the minimal latency of 2 reads
# per tweet. Values higher than ~1kb will increase latency by waiting
# for more data to arrive but may also increase throughput by doing
# fewer socket read calls.
self.chunk_size = options.get("chunk_size", 512)
self.verify = options.get("verify", True)
self.api = API()
self.headers = options.get("headers") or {}
self.new_session()
self.body = None
self.retry_time = self.retry_time_start
self.snooze_time = self.snooze_time_step
# Example: proxies = {'http': 'http://localhost:1080', 'https': 'http://localhost:1080'}
self.proxies = options.get("proxies")
self.host = options.get('host', 'stream.twitter.com')
def new_session(self):
self.session = requests.Session()
self.session.headers = self.headers
self.session.params = None
def _run(self):
# Authenticate
url = "https://%s%s" % (self.host, self.url)
# Connect and process the stream
error_counter = 0
resp = None
exc_info = None
while self.running:
if self.retry_count is not None:
if error_counter > self.retry_count:
# quit if error count greater than retry count
break
try:
auth = self.auth.apply_auth()
resp = self.session.request('POST',
url,
data=self.body,
timeout=self.timeout,
stream=True,
auth=auth,
verify=self.verify,
proxies = self.proxies)
if resp.status_code != 200:
if self.listener.on_error(resp.status_code) is False:
break
error_counter += 1
if resp.status_code == 420:
self.retry_time = max(self.retry_420_start,
self.retry_time)
sleep(self.retry_time)
self.retry_time = min(self.retry_time * 2,
self.retry_time_cap)
else:
error_counter = 0
self.retry_time = self.retry_time_start
self.snooze_time = self.snooze_time_step
self.listener.on_connect()
self._read_loop(resp)
except (Timeout, ssl.SSLError) as exc:
# This is still necessary, as a SSLError can actually be
# thrown when using Requests
# If it's not time out treat it like any other exception
if isinstance(exc, ssl.SSLError):
if not (exc.args and 'timed out' in str(exc.args[0])):
exc_info = sys.exc_info()
break
if self.listener.on_timeout() is False:
break
if self.running is False:
break
sleep(self.snooze_time)
self.snooze_time = min(self.snooze_time + self.snooze_time_step,
self.snooze_time_cap)
except Exception as exc:
exc_info = sys.exc_info()
# any other exception is fatal, so kill loop
break
# cleanup
self.running = False
if resp:
resp.close()
self.new_session()
if exc_info:
# call a handler first so that the exception can be logged.
self.listener.on_exception(exc_info[1])
six.reraise(*exc_info)
def _data(self, data):
if self.listener.on_data(data) is False:
self.running = False
def _read_loop(self, resp):
charset = resp.headers.get('content-type', default='')
enc_search = re.search(r'charset=(?P<enc>\S*)', charset)
if enc_search is not None:
encoding = enc_search.group('enc')
else:
encoding = 'utf-8'
buf = ReadBuffer(resp.raw, self.chunk_size, encoding=encoding)
while self.running and not resp.raw.closed:
length = 0
while not resp.raw.closed:
line = buf.read_line()
stripped_line = line.strip() if line else line # line is sometimes None so we need to check here
if not stripped_line:
self.listener.keep_alive() # keep-alive new lines are expected
elif stripped_line.isdigit():
length = int(stripped_line)
break
else:
raise TweepError('Expecting length, unexpected value found')
next_status_obj = buf.read_len(length)
if self.running and next_status_obj:
self._data(next_status_obj)
# # Note: keep-alive newlines might be inserted before each length value.
# # read until we get a digit...
# c = b'\n'
# for c in resp.iter_content(decode_unicode=True):
# if c == b'\n':
# continue
# break
#
# delimited_string = c
#
# # read rest of delimiter length..
# d = b''
# for d in resp.iter_content(decode_unicode=True):
# if d != b'\n':
# delimited_string += d
# continue
# break
#
# # read the next twitter status object
# if delimited_string.decode('utf-8').strip().isdigit():
# status_id = int(delimited_string)
# next_status_obj = resp.raw.read(status_id)
# if self.running:
# self._data(next_status_obj.decode('utf-8'))
if resp.raw.closed:
self.on_closed(resp)
def _start(self, is_async):
self.running = True
if is_async:
self._thread = Thread(target=self._run)
self._thread.daemon = self.daemon
self._thread.start()
else:
self._run()
def on_closed(self, resp):
""" Called when the response has been closed by Twitter """
pass
def userstream(self,
stall_warnings=False,
_with=None,
replies=None,
track=None,
locations=None,
is_async=False,
encoding='utf8'):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/user.json' % STREAM_VERSION
self.host = 'userstream.twitter.com'
if stall_warnings:
self.session.params['stall_warnings'] = stall_warnings
if _with:
self.session.params['with'] = _with
if replies:
self.session.params['replies'] = replies
if locations and len(locations) > 0:
if len(locations) % 4 != 0:
raise TweepError("Wrong number of locations points, "
"it has to be a multiple of 4")
self.session.params['locations'] = ','.join(['%.2f' % l for l in locations])
if track:
self.session.params['track'] = u','.join(track).encode(encoding)
self._start(is_async)
def firehose(self, count=None, is_async=False):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/firehose.json' % STREAM_VERSION
if count:
self.url += '&count=%s' % count
self._start(is_async)
def retweet(self, is_async=False):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/retweet.json' % STREAM_VERSION
self._start(is_async)
def sample(self, is_async=False, languages=None, stall_warnings=False):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/sample.json' % STREAM_VERSION
if languages:
self.session.params['language'] = ','.join(map(str, languages))
if stall_warnings:
self.session.params['stall_warnings'] = 'true'
self._start(is_async)
def filter(self, follow=None, track=None, is_async=False, locations=None,
stall_warnings=False, languages=None, encoding='utf8', filter_level=None):
self.body = {}
self.session.headers['Content-type'] = "application/x-www-form-urlencoded"
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/filter.json' % STREAM_VERSION
if follow:
self.body['follow'] = u','.join(follow).encode(encoding)
if track:
self.body['track'] = u','.join(track).encode(encoding)
if locations and len(locations) > 0:
if len(locations) % 4 != 0:
raise TweepError("Wrong number of locations points, "
"it has to be a multiple of 4")
self.body['locations'] = u','.join(['%.4f' % l for l in locations])
if stall_warnings:
self.body['stall_warnings'] = stall_warnings
if languages:
self.body['language'] = u','.join(map(str, languages))
if filter_level:
self.body['filter_level'] = filter_level.encode(encoding)
self.session.params = {'delimited': 'length'}
self._start(is_async)
def sitestream(self, follow, stall_warnings=False,
with_='user', replies=False, is_async=False):
self.body = {}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/site.json' % STREAM_VERSION
self.body['follow'] = u','.join(map(six.text_type, follow))
self.body['delimited'] = 'length'
if stall_warnings:
self.body['stall_warnings'] = stall_warnings
if with_:
self.body['with'] = with_
if replies:
self.body['replies'] = replies
self._start(is_async)
def disconnect(self):
if self.running is False:
return
self.running = False
|
process.py
|
"""Provide process tools."""
import asyncio
import signal
from multiprocessing import Pipe, Process
from multiprocessing.connection import Connection
from time import sleep
from typing import TYPE_CHECKING, Any, Callable, Dict, Tuple
from cpias.const import LOGGER
from cpias.exceptions import CPIASError
if TYPE_CHECKING:
from cpias.server import CPIAServer
class ReceiveError(CPIASError):
"""Error raised when receving from a process failed."""
def create_process(
server: "CPIAServer", create_callback: Callable, *args: Any
) -> Tuple[Callable, Callable]:
"""Create a persistent process."""
parent_conn, child_conn = Pipe()
prc = Process(target=func_wrapper, args=(create_callback, child_conn, *args))
prc.start()
def stop_process() -> None:
"""Stop process."""
prc.terminate()
server.on_stop(stop_process)
async def async_recv() -> Any:
"""Receive data from the process connection asynchronously."""
while True:
if not prc.is_alive() or parent_conn.poll():
break
await asyncio.sleep(0.5)
if not prc.is_alive():
raise ReceiveError
try:
return await server.add_executor_job(parent_conn.recv)
except EOFError as exc:
LOGGER.debug("Nothing more to receive")
raise ReceiveError from exc
async def async_send(data: Dict[Any, Any]) -> None:
"""Send data to the process."""
parent_conn.send(data)
return async_recv, async_send
def func_wrapper(create_callback: Callable, conn: Connection, *args: Any) -> None:
"""Wrap a function with connection to receive and send data."""
running = True
# pylint: disable=unused-argument
def handle_signal(signum: int, frame: Any) -> None:
"""Handle signal."""
nonlocal running
running = False
conn.close()
signal.signal(signal.SIGTERM, handle_signal)
signal.signal(signal.SIGINT, handle_signal)
try:
callback = create_callback(*args)
except Exception as exc: # pylint: disable=broad-except
LOGGER.error("Failed to create callback: %s", exc)
return
while running:
while running:
if conn.poll():
break
sleep(0.5)
try:
data = conn.recv()
except EOFError:
LOGGER.debug("Nothing more to receive")
break
except OSError:
LOGGER.debug("Connection is closed")
break
try:
result = callback(data)
except Exception as exc: # pylint: disable=broad-except
LOGGER.error("Failed to run callback: %s", exc)
break
if not running:
break
try:
conn.send(result)
except ValueError:
LOGGER.error("Failed to send result %s", result)
except OSError:
LOGGER.debug("Connection is closed")
break
LOGGER.debug("Exiting process")
|
resample_dataset.py
|
import os, sys, yaml
import numpy as np
from pathlib import Path
import random
import threading
import argparse
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),"../../..")))
from meshcnn.models.layers.mesh_prepare import fill_from_file,remove_non_manifolds, build_gemm
surfaceTypes = ['Plane','Revolution', 'Cylinder','Extrusion','Cone','Other','Sphere','Torus','BSpline']
favoredSurfaceTypeIndices = [1,3,4,5,6,7,8]
def parseYamlFile(featPath):
data = None
with open(featPath, 'r') as stream:
try:
data = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
return data
def getSurfaceTypeFaceCount(featPath):
featData = parseYamlFile(featPath)
surfaceTypeFaceCount = np.zeros(len(surfaceTypes), dtype=int)
for surface in featData['surfaces']:
surfaceId = surfaceTypes.index(surface['type'])
surfaceTypeFaceCount[surfaceId] += len(surface['face_indices'])
return surfaceTypeFaceCount
def getTargetDatasetRoot(links):
assert(len(links) > 0)
targetObj = os.readlink(links[0])
pathToObjDir = os.path.split(targetObj)[0]
objPrefixPath, _ = os.path.split(pathToObjDir)
return os.path.split(objPrefixPath)[0]
def objPathToFeatPath(objPath):
pathToObjDir = os.path.split(objPath)[0]
globalObjPath, sampleId = os.path.split(pathToObjDir)
datasetPath = os.path.split(globalObjPath)[0]
featDirPath = os.path.join(datasetPath,"feat",sampleId)
return os.path.join(featDirPath, next(os.walk(featDirPath))[2][0])
def getObjLinks(datasetRoot):
objLinkPaths = []
for root, _, fnames in os.walk(datasetRoot):
for fname in fnames:
if (os.path.splitext(fname)[1] == ".obj"):
objLinkPaths.append(os.path.join(root, fname))
return objLinkPaths
def selectSample(surfaceTypeFaceCount):
return surfaceTypeFaceCount[favoredSurfaceTypeIndices].sum() > 0
def addMesh(srcPath, dstPath):
os.symlink(os.path.abspath(srcPath),os.path.join(dstPath,os.path.basename(srcPath)))
def thread_function(objFileTargets, results):
results["totals"] = np.zeros(len(surfaceTypes), dtype=int)
results["selected"] = []
objFileTargets = objFileTargets[:5]
threadId = threading.current_thread()
for objPath in objFileTargets:
surfaceTypeFaceCount = getSurfaceTypeFaceCount(objPathToFeatPath(objPath))
if (selectSample(surfaceTypeFaceCount)):
results["selected"].append(objPath)
print("{}: {} -> {}".format(threadId, os.path.relpath(objPath, targetDatasetRoot), surfaceTypeFaceCount))
results["totals"] += surfaceTypeFaceCount
# if (len(selectedObjPaths) % 20 == 0):
# print("Totals after {} samples: {}".format(len(selectedObjPaths),
# totalSurfaceTypeFaceCount * 100 / totalSurfaceTypeFaceCount.sum()))
if __name__ == '__main__':
parser = argparse.ArgumentParser("Resample dataset removing samples with only planes or cylinder surfaces")
parser.add_argument('--src', required=True, type=str, help="Path where source dataset is located")
parser.add_argument('--dst', required=True, type=str, help="Path where dataset will be saved")
parser.add_argument('--testRatio', default=0.2, type=float, help="Test/train ratio")
parser.add_argument('--numThreads', type=int, default=7, help="Number of threads")
args = parser.parse_args()
oldDatasetRoot = args.src
newDatasetRoot = args.dst
testTrainRatio = args.testRatio
numThreads = args.numThreads
objLinks = getObjLinks(oldDatasetRoot)
if (len(objLinks) == 0):
print("No obj files found in", oldDatasetRoot)
exit(1)
dstTrainPath = os.path.join(newDatasetRoot,"train")
dstTestPath = os.path.join(newDatasetRoot,"test")
try:
Path(dstTrainPath).mkdir(parents=True, exist_ok=False)
Path(dstTestPath).mkdir(parents=True, exist_ok=False)
except FileExistsError as f_error:
print(f_error)
exit(1)
targetDatasetRoot = getTargetDatasetRoot(objLinks)
print("Resampling dataset {} (targets in {}) with {} obj files into {}".format(oldDatasetRoot, targetDatasetRoot, len(objLinks), newDatasetRoot))
#objLinks.sort()
objFileTargets = list(map(lambda link: os.readlink(link),objLinks))
#featFilePaths = list(map(objPathToFeatPath,objFileTargets))
totalSurfaceTypeFaceCount = np.zeros(len(surfaceTypes), dtype=int)
selectedObjPaths = []
thLinkCount = int(len(objFileTargets) / numThreads)
threads = []
results = []
for index in range(numThreads):
thListBegin = index * thLinkCount
thListEnd = len(objFileTargets) if index == (numThreads - 1) else thListBegin + thLinkCount
print("Process {} takes models [{},{}]".format(index, thListBegin, thListEnd))
thList = objFileTargets[thListBegin:thListEnd]
results.append({})
th = threading.Thread(target=thread_function, args=(thList, results[index]))
threads.append(th)
th.start()
for i in range(numThreads):
threads[i].join()
totalSurfaceTypeFaceCount += results[i]["totals"]
selectedObjPaths += results[i]["selected"]
random.shuffle(selectedObjPaths)
numTotalSamples = len(selectedObjPaths)
numTestSamples = int(numTotalSamples * testTrainRatio)
numTrainSamples = numTotalSamples - numTestSamples
for samplePath in selectedObjPaths[:numTrainSamples]:
addMesh(samplePath, dstTrainPath)
for samplePath in selectedObjPaths[numTrainSamples:]:
addMesh(samplePath, dstTestPath)
print("Resampled dataset {} samples, ({} train, {} test), surface frequencies {}".format(numTotalSamples, numTrainSamples, numTestSamples, totalSurfaceTypeFaceCount * 100 / totalSurfaceTypeFaceCount.sum()))
|
zergling_actor.py
|
import copy
import queue
import time
import uuid
from collections import namedtuple
from threading import Thread
from typing import List, Dict, Callable, Any, Tuple
from easydict import EasyDict
from collections import deque
import torch
from ctools.data import default_collate, default_decollate
from ctools.torch_utils import to_device, tensor_to_list
from ctools.utils import get_data_compressor, lists_to_dicts
from ctools.worker.agent import BaseAgent
from ctools.worker.actor import BaseActor
from ctools.worker.actor.env_manager import SubprocessEnvManager, BaseEnvManager
class ZerglingActor(BaseActor):
"""
Feature:
- one agent/sync many agents, many envs
- async envs(step + reset)
- batch network eval
- different episode length env
- periodic agent update
- metadata + stepdata
"""
# override
def _init(self) -> None:
super()._init()
self._traj_queue = queue.Queue()
self._result_queue = queue.Queue()
self._update_agent_thread = Thread(target=self._update_agent, args=())
self._update_agent_thread.daemon = True
self._update_agent_thread.start() # keep alive in the whole job
self._model_deque = [deque(maxlen=1) for _ in range(2)]
self._pack_trajectory_thread = Thread(target=self._pack_trajectory, args=())
self._pack_trajectory_thread.daemon = True
self._pack_trajectory_thread.start()
self._send_result_thread = Thread(target=self._send_result_thread, args=())
self._send_result_thread.daemon = True
self._send_result_thread.start() # keep alive in the whole job
# override
def _init_with_job(self, job: dict) -> None:
super()._init_with_job(job)
self._job = job
self._logger.info('ACTOR({}): init with job {} in {}'.format(self._actor_uid, self._job['job_id'], time.time()))
self._start_time = time.time()
self._step_count = 0
assert len(self._job['agent']) >= 1
self._env_kwargs = self._cfg.actor.env_kwargs
self._env_kwargs.env_cfg.player1.name = self._job['player_id'][0].split('_')[0]
self._env_kwargs.env_cfg.player2.name = self._job['player_id'][1].split('_')[0]
self._env_num = self._env_kwargs['env_num']
self._compressor = get_data_compressor(self._cfg.actor.compressor)
self._agent_update_freq = self._cfg.actor.agent_update_freq
self._job_result = {k: [] for k in range(self._env_num)}
self._collate_fn = default_collate
self._decollate_fn = default_decollate
self._env_manager = self._setup_env_manager()
self._agent = self._setup_agent()
self._obs_pool = {k: None for k in range(self._env_num)}
self._act_pool = {k: None for k in range(self._env_num)}
self._data_buffer = {k: [] for k in range(self._env_num)}
self._last_data_buffer = {k: [] for k in range(self._env_num)}
self._episode_result = {k: None for k in range(self._env_num)}
self._job_finish_flag = False
def _setup_env_manager(self) -> BaseEnvManager:
env_cfg = EasyDict(self._env_kwargs['env_cfg'])
map_name = env_cfg.map_name
env_num = self._env_kwargs['env_num']
if isinstance(env_cfg, dict):
env_fn = self._setup_env_fn(env_cfg)
env_cfg = [env_cfg for _ in range(env_num)]
else:
raise TypeError("not support env_cfg type: {}".format(env_cfg))
env_manager = SubprocessEnvManager(
env_fn=env_fn, env_cfg=env_cfg, env_num=env_num, episode_num=self._env_kwargs['episode_num'], map_name=map_name
)
env_manager.launch()
return env_manager
# override
def _agent_inference(self, obs: Dict[int, Any]) -> Dict[int, Any]:
# save in obs_pool
for k, v in obs.items():
self._obs_pool[k] = copy.deepcopy(v)
env_id = obs.keys()
obs = self._collate_fn(list(obs.values()))
if self._cfg.actor.use_cuda:
obs = to_device(obs, 'cuda')
forward_kwargs = self._job['forward_kwargs']
forward_kwargs['state_id'] = list(env_id)
if len(self._job['agent']) == 1:
data = self._agent.forward(obs, **forward_kwargs)
else:
data = [agent.forward(obs[i], **forward_kwargs) for i, agent in enumerate(self._agent)]
if self._cfg.actor.use_cuda:
data = to_device(data, 'cpu')
data = self._decollate_fn(data)
data = [lists_to_dicts(d) for d in data]
data = {i: d for i, d in zip(env_id, data)}
return data
# override
def _env_step(self, agent_output: Dict[int, Dict]) -> Dict[int, Any]:
# save in act_pool
for k, v in agent_output.items():
self._act_pool[k] = copy.deepcopy(v)
action = {k: v['action'] for k, v in agent_output.items()}
return self._env_manager.step(action)
# override
def _process_timestep(self, timestep: Dict[int, namedtuple]) -> None:
for env_id, t in timestep.items():
data = self._get_transition(self._obs_pool[env_id], self._act_pool[env_id], timestep[env_id])
self._data_buffer[env_id].append(data)
self._step_count += 1
if len(self._data_buffer[env_id]) == (self._adder_kwargs['data_push_length'] + 1):
# last data copy must be in front of obs_next
last = self._data_buffer[env_id][-1]
data = self._data_buffer[env_id][:-1]
self._last_data_buffer[env_id].clear()
self._last_data_buffer[env_id] = copy.deepcopy(data)
if self._adder_kwargs['use_gae']:
gamma = self._adder_kwargs['gamma']
gae_lambda = self._adder_kwargs['gae_lambda']
data = self._adder.get_gae(data, last['value'], gamma, gae_lambda)
self._traj_queue.put({'data': data, 'env_id': env_id, 'agent_id': 0, 'job': copy.deepcopy(self._job)})
self._data_buffer[env_id].clear()
self._data_buffer[env_id].append(last)
if t.done:
self._job_result[env_id].append(t.info)
self._logger.info('ACTOR({}): env{} finish episode in {}'.format(self._actor_uid, env_id, time.time()))
cur_len = len(self._data_buffer[env_id])
miss_len = self._adder_kwargs['data_push_length'] - cur_len
data = self._last_data_buffer[env_id][-miss_len:] + self._data_buffer[env_id]
if self._adder_kwargs['use_gae']:
gamma = self._adder_kwargs['gamma']
gae_lambda = self._adder_kwargs['gae_lambda']
data = self._adder.get_gae(data, torch.zeros(1), gamma, gae_lambda)
self._traj_queue.put({'data': data, 'env_id': env_id, 'agent_id': 0, 'job': copy.deepcopy(self._job)})
self._last_data_buffer[env_id].clear()
self._data_buffer[env_id].clear()
# ******************************** thread **************************************
# override
def _update_agent(self) -> None:
last = time.time()
while not self._end_flag:
if hasattr(self, '_job') and hasattr(self, '_agent'):
cur = time.time()
interval = cur - last
if interval < self._agent_update_freq:
time.sleep(self._agent_update_freq * 0.1)
continue
else:
try:
for i in range(len(self._job['agent'])):
if i in self._job['update_agent']:
t = time.time()
agent_update_info, path = self.get_agent_update_info(self._job['learner_uid'][i])
self._model_deque[i].append(agent_update_info)
#if len(self._job['agent']) == 1:
# self._agent.load_state_dict(agent_update_info)
#else:
# self._agent[i].load_state_dict(agent_update_info)
self._logger.info(
'ACTOR({}): update agent {} with {} in {}, cost_time:{}'.format(self._actor_uid, i, path, time.time(), time.time() - t)
)
last = time.time()
except:
self._logger.error('update agent fail, try again!!!')
self._end_flag = True
time.sleep(0.1)
self._logger.info('update agent thread exit!!!!!!')
# override
def _pack_trajectory(self) -> None:
def _pack(element):
data, env_id, agent_id, job = list(element.values())
# send metadata
job_id = job['job_id']
traj_id = "job_{}_env_{}_agent_{}_{}".format(job_id, env_id, agent_id, str(uuid.uuid1()))
metadata = {
'traj_id': traj_id,
'learner_uid': job['learner_uid'][0],
'launch_player': job['launch_player'],
'env_id': env_id,
'agent_id': agent_id,
'actor_uid': self._actor_uid,
'done': data[-1]['done'],
# TODO(nyz) the relationship between traj priority and step priority
'priority': 1.0,
'traj_finish_time': time.time(),
'job_id': job_id,
'data_push_length': len(data),
'compressor': self._cfg.actor.compressor,
'job': job,
}
# save data
data = self._compressor(data)
t = time.time()
self.send_traj_stepdata(traj_id, data)
self.send_traj_metadata(metadata)
self._logger.info('ACTOR({}): send traj({}) in {}, cost time:{}'.format(self._actor_uid, traj_id, time.time(), time.time() - t))
finished_traj_num = 0
while not self._end_flag:
try:
element = self._traj_queue.get(timeout=1)
except queue.Empty:
time.sleep(1)
continue
_pack(element)
finished_traj_num += 1
self._logger.info('ACTOR({}) finished {}'.format(self._actor_uid, finished_traj_num))
self._logger.info('send traj thread exit!!!!!')
def _send_result_thread(self):
while not self._end_flag:
try:
result = self._result_queue.get(timeout=1)
except queue.Empty:
time.sleep(1)
continue
result_info = {
'job_id': self._job['job_id'],
'actor_uid': self._actor_uid,
'player_id': self._job['player_id'],
'launch_player': self._job['launch_player'],
'result': result['result'],
'dists': result['dists'],
'units_num': result['units_num'],
}
self.send_result(result_info)
self._logger.info('send result thread exit!!!!!!!')
def _setup_env_fn(self, env_cfg: dict) -> Callable:
"""set env_fn"""
raise NotImplementedError
def _setup_agent(self) -> BaseAgent:
"""set agent, load init state_dict, reset"""
raise NotImplementedError
def _get_transition(self, obs: Any, agent_output: Dict, timestep: namedtuple) -> dict:
"""get one step transition"""
raise NotImplementedError
|
__init__.py
|
"""Hermes MQTT server for Rhasspy wakeword with Porcupine"""
import asyncio
import logging
import queue
import socket
import struct
import threading
import typing
from pathlib import Path
from rhasspyhermes.audioserver import AudioFrame
from rhasspyhermes.base import Message
from rhasspyhermes.client import GeneratorType, HermesClient, TopicArgs
from rhasspyhermes.wake import (
GetHotwords,
Hotword,
HotwordDetected,
HotwordError,
Hotwords,
HotwordToggleOff,
HotwordToggleOn,
HotwordToggleReason,
)
WAV_HEADER_BYTES = 44
_LOGGER = logging.getLogger("rhasspywake_porcupine_hermes")
# -----------------------------------------------------------------------------
class WakeHermesMqtt(HermesClient):
"""Hermes MQTT server for Rhasspy wakeword with Porcupine."""
def __init__(
self,
client,
porcupine: typing.Any,
model_ids: typing.List[str],
wakeword_ids: typing.List[str],
sensitivities: typing.List[float],
keyword_dirs: typing.Optional[typing.List[Path]] = None,
site_ids: typing.Optional[typing.List[str]] = None,
enabled: bool = True,
sample_rate: int = 16000,
sample_width: int = 2,
channels: int = 1,
udp_audio: typing.Optional[typing.List[typing.Tuple[str, int, str]]] = None,
udp_chunk_size: int = 2048,
):
super().__init__(
"rhasspywake_porcupine_hermes",
client,
sample_rate=sample_rate,
sample_width=sample_width,
channels=channels,
site_ids=site_ids,
)
self.subscribe(AudioFrame, HotwordToggleOn, HotwordToggleOff, GetHotwords)
self.porcupine = porcupine
self.wakeword_ids = wakeword_ids
self.model_ids = model_ids
self.sensitivities = sensitivities
self.keyword_dirs = keyword_dirs or []
self.enabled = enabled
self.disabled_reasons: typing.Set[str] = set()
# Required audio format
self.sample_rate = sample_rate
self.sample_width = sample_width
self.channels = channels
# Queue of WAV audio chunks to process (plus site_id)
self.wav_queue: queue.Queue = queue.Queue()
self.chunk_size = self.porcupine.frame_length * 2
self.chunk_format = "h" * self.porcupine.frame_length
self.audio_buffer = bytes()
self.first_audio = True
# Start threads
threading.Thread(target=self.detection_thread_proc, daemon=True).start()
# Listen for raw audio on UDP too
self.udp_chunk_size = udp_chunk_size
if udp_audio:
for udp_host, udp_port, udp_site_id in udp_audio:
threading.Thread(
target=self.udp_thread_proc,
args=(udp_host, udp_port, udp_site_id),
daemon=True,
).start()
# -------------------------------------------------------------------------
async def handle_audio_frame(self, wav_bytes: bytes, site_id: str = "default"):
"""Process a single audio frame"""
self.wav_queue.put((wav_bytes, site_id))
async def handle_detection(
self, keyword_index: int, wakeword_id: str, site_id="default"
) -> typing.AsyncIterable[
typing.Union[typing.Tuple[HotwordDetected, TopicArgs], HotwordError]
]:
"""Handle a successful hotword detection"""
try:
assert (
len(self.model_ids) > keyword_index
), f"Missing {keyword_index} in models"
yield (
HotwordDetected(
site_id=site_id,
model_id=self.model_ids[keyword_index],
current_sensitivity=self.sensitivities[keyword_index],
model_version="",
model_type="personal",
),
{"wakeword_id": wakeword_id},
)
except Exception as e:
_LOGGER.exception("handle_detection")
yield HotwordError(
error=str(e), context=str(keyword_index), site_id=site_id
)
async def handle_get_hotwords(
self, get_hotwords: GetHotwords
) -> typing.AsyncIterable[typing.Union[Hotwords, HotwordError]]:
"""Report available hotwords"""
try:
if self.keyword_dirs:
# Add all models from keyword dir
model_paths = []
for keyword_dir in self.keyword_dirs:
if not keyword_dir.is_dir():
_LOGGER.warning("Missing keyword dir: %s", str(keyword_dir))
continue
for keyword_file in keyword_dir.glob("*.ppn"):
model_paths.append(keyword_file)
else:
# Add current model(s) only
model_paths = [Path(model_id) for model_id in self.model_ids]
models: typing.List[Hotword] = []
for ppn_file in model_paths:
words = ppn_file.with_suffix("").name.split("_")
if len(words) == 1:
# porcupine.ppn -> "porcupine"
model_words = words[0]
else:
# smart_mirror_linux.ppn -> "smart mirror"
model_words = " ".join(words[:-1])
models.append(Hotword(model_id=ppn_file.name, model_words=model_words))
yield Hotwords(
models=models, id=get_hotwords.id, site_id=get_hotwords.site_id
)
except Exception as e:
_LOGGER.exception("handle_get_hotwords")
yield HotwordError(
error=str(e), context=str(get_hotwords), site_id=get_hotwords.site_id
)
def detection_thread_proc(self):
"""Handle WAV audio chunks."""
try:
while True:
wav_bytes, site_id = self.wav_queue.get()
if self.first_audio:
_LOGGER.debug("Receiving audio")
self.first_audio = False
# Add to persistent buffer
audio_data = self.maybe_convert_wav(wav_bytes)
self.audio_buffer += audio_data
# Process in chunks.
# Any remaining audio data will be kept in buffer.
while len(self.audio_buffer) >= self.chunk_size:
chunk = self.audio_buffer[: self.chunk_size]
self.audio_buffer = self.audio_buffer[self.chunk_size :]
unpacked_chunk = struct.unpack_from(self.chunk_format, chunk)
keyword_index = self.porcupine.process(unpacked_chunk)
if keyword_index:
# Detection
if len(self.model_ids) == 1:
keyword_index = 0
if keyword_index < len(self.wakeword_ids):
wakeword_id = self.wakeword_ids[keyword_index]
else:
wakeword_id = ""
if not wakeword_id:
# Use file name
wakeword_id = Path(self.model_ids[keyword_index]).stem
asyncio.run_coroutine_threadsafe(
self.publish_all(
self.handle_detection(
keyword_index, wakeword_id, site_id=site_id
)
),
self.loop,
)
except Exception:
_LOGGER.exception("detection_thread_proc")
# -------------------------------------------------------------------------
def udp_thread_proc(self, host: str, port: int, site_id: str):
"""Handle WAV chunks from UDP socket."""
try:
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.bind((host, port))
_LOGGER.debug("Listening for audio on UDP %s:%s", host, port)
while True:
wav_bytes, _ = udp_socket.recvfrom(
self.udp_chunk_size + WAV_HEADER_BYTES
)
if self.enabled:
self.wav_queue.put((wav_bytes, site_id))
except Exception:
_LOGGER.exception("udp_thread_proc")
# -------------------------------------------------------------------------
async def on_message_blocking(
self,
message: Message,
site_id: typing.Optional[str] = None,
session_id: typing.Optional[str] = None,
topic: typing.Optional[str] = None,
) -> GeneratorType:
"""Received message from MQTT broker."""
# Check enable/disable messages
if isinstance(message, HotwordToggleOn):
if message.reason == HotwordToggleReason.UNKNOWN:
# Always enable on unknown
self.disabled_reasons.clear()
else:
self.disabled_reasons.discard(message.reason)
if self.disabled_reasons:
_LOGGER.debug("Still disabled: %s", self.disabled_reasons)
else:
self.enabled = True
self.first_audio = True
_LOGGER.debug("Enabled")
elif isinstance(message, HotwordToggleOff):
self.enabled = False
self.disabled_reasons.add(message.reason)
_LOGGER.debug("Disabled")
elif isinstance(message, AudioFrame):
if self.enabled:
assert site_id, "Missing site_id"
await self.handle_audio_frame(message.wav_bytes, site_id=site_id)
elif isinstance(message, GetHotwords):
async for hotword_result in self.handle_get_hotwords(message):
yield hotword_result
|
Client.py
|
import threading
import common
from config import logs as log
from config.config import *
from models.Message import *
class Client:
client = None
address = None
session_id = None
# You can add any variable which you want here
def __init__(self, c, addr, session_id):
self.client = c
self.address = addr
self.session_id = session_id
print(log.new_connection + str(addr) + "-session_id:" + session_id + " ." + get_time())
threading.Thread(target=self.run, args=[]).start()
def run(self):
while True:
try:
data = self.client.recv(1024) # This message is sent from client
if not data:
self.client.close()
else:
print(log.new_message + str(
self.address) + "-session_id:" + self.session_id + " -message:" + data.decode())
common.send_to_all(Message("title", "con", "url"))
except socket.error as error_msg:
self.client.close()
for index, x in enumerate(common.clients):
if x.address == self.address:
common.clients.pop(index)
print(log.close_connection + str(self.address) + "." + get_time())
break
def send(self, data):
self.client.send(data) # This syntax sends msg to client
|
decorator.py
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'map_readers', 'buffered', 'compose', 'chain', 'shuffle',
'ComposeNotAligned', 'firstn', 'xmap_readers', 'PipeReader',
'multiprocess_reader', 'Fake'
]
from threading import Thread
import subprocess
import multiprocessing
import sys
from six.moves.queue import Queue
from six.moves import zip_longest
from six.moves import map
from six.moves import zip
import itertools
import random
import zlib
import paddle.compat as cpt
def map_readers(func, *readers):
"""
Creates a data reader that outputs return value of function using
output of each data readers as arguments.
:param func: function to use. The type of func should be (Sample) => Sample
:type: callable
:param readers: readers whose outputs will be used as arguments of func.
:return: the created data reader.
:rtype: callable
"""
def reader():
rs = []
for r in readers:
rs.append(r())
for e in map(func, *rs):
yield e
return reader
def shuffle(reader, buf_size):
"""
Creates a data reader whose data output is shuffled.
Output from the iterator that created by original reader will be
buffered into shuffle buffer, and then shuffled. The size of shuffle buffer
is determined by argument buf_size.
:param reader: the original reader whose output will be shuffled.
:type reader: callable
:param buf_size: shuffle buffer size.
:type buf_size: int
:return: the new reader whose output is shuffled.
:rtype: callable
"""
def data_reader():
buf = []
for e in reader():
buf.append(e)
if len(buf) >= buf_size:
random.shuffle(buf)
for b in buf:
yield b
buf = []
if len(buf) > 0:
random.shuffle(buf)
for b in buf:
yield b
return data_reader
def chain(*readers):
"""
Creates a data reader whose output is the outputs of input data
readers chained together.
If input readers output following data entries:
[0, 0, 0]
[1, 1, 1]
[2, 2, 2]
The chained reader will output:
[0, 0, 0, 1, 1, 1, 2, 2, 2]
:param readers: input readers.
:return: the new data reader.
:rtype: callable
"""
def reader():
rs = []
for r in readers:
rs.append(r())
for e in itertools.chain(*rs):
yield e
return reader
class ComposeNotAligned(ValueError):
pass
def compose(*readers, **kwargs):
"""
Creates a data reader whose output is the combination of input readers.
If input readers output following data entries:
(1, 2) 3 (4, 5)
The composed reader will output:
(1, 2, 3, 4, 5)
:param readers: readers that will be composed together.
:param check_alignment: if True, will check if input readers are aligned
correctly. If False, will not check alignment and trailing outputs
will be discarded. Defaults to True.
:type check_alignment: bool
:return: the new data reader.
:raises ComposeNotAligned: outputs of readers are not aligned.
Will not raise when check_alignment is set to False.
"""
check_alignment = kwargs.pop('check_alignment', True)
def make_tuple(x):
if isinstance(x, tuple):
return x
else:
return (x, )
def reader():
rs = []
for r in readers:
rs.append(r())
if not check_alignment:
for outputs in zip(*rs):
yield sum(list(map(make_tuple, outputs)), ())
else:
for outputs in zip_longest(*rs):
for o in outputs:
if o is None:
# None will be not be present if compose is aligned
raise ComposeNotAligned(
"outputs of readers are not aligned.")
yield sum(list(map(make_tuple, outputs)), ())
return reader
def buffered(reader, size):
"""
Creates a buffered data reader.
The buffered data reader will read and save data entries into a
buffer. Reading from the buffered data reader will proceed as long
as the buffer is not empty.
:param reader: the data reader to read from.
:type reader: callable
:param size: max buffer size.
:type size: int
:returns: the buffered data reader.
"""
class EndSignal():
pass
end = EndSignal()
def read_worker(r, q):
for d in r:
q.put(d)
q.put(end)
def data_reader():
r = reader()
q = Queue(maxsize=size)
t = Thread(
target=read_worker, args=(
r,
q, ))
t.daemon = True
t.start()
e = q.get()
while e != end:
yield e
e = q.get()
return data_reader
def firstn(reader, n):
"""
Limit the max number of samples that reader could return.
:param reader: the data reader to read from.
:type reader: callable
:param n: the max number of samples that return.
:type n: int
:return: the decorated reader.
:rtype: callable
"""
# TODO(yuyang18): Check if just drop the reader, could clean the opened
# resource or not?
def firstn_reader():
for i, item in enumerate(reader()):
if i == n:
break
yield item
return firstn_reader
class XmapEndSignal():
pass
def xmap_readers(mapper, reader, process_num, buffer_size, order=False):
"""
Use multi-threads to map samples from reader by a mapper defined by user.
Args:
mapper (callable): a function to map the data from reader.
reader (callable): a data reader which yields the data.
process_num (int): thread number to handle original sample.
buffer_size (int): size of the queue to read data in.
order (bool): whether to keep the data order from original reader.
Default False.
Returns:
callable: a decorated reader with data mapping.
"""
end = XmapEndSignal()
# define a worker to read samples from reader to in_queue
def read_worker(reader, in_queue):
for i in reader():
in_queue.put(i)
in_queue.put(end)
# define a worker to read samples from reader to in_queue with order flag
def order_read_worker(reader, in_queue):
in_order = 0
for i in reader():
in_queue.put((in_order, i))
in_order += 1
in_queue.put(end)
# define a worker to handle samples from in_queue by mapper
# and put mapped samples into out_queue
def handle_worker(in_queue, out_queue, mapper):
sample = in_queue.get()
while not isinstance(sample, XmapEndSignal):
r = mapper(sample)
out_queue.put(r)
sample = in_queue.get()
in_queue.put(end)
out_queue.put(end)
# define a worker to handle samples from in_queue by mapper
# and put mapped samples into out_queue by order
def order_handle_worker(in_queue, out_queue, mapper, out_order):
ins = in_queue.get()
while not isinstance(ins, XmapEndSignal):
order, sample = ins
r = mapper(sample)
while order != out_order[0]:
pass
out_queue.put(r)
out_order[0] += 1
ins = in_queue.get()
in_queue.put(end)
out_queue.put(end)
def xreader():
in_queue = Queue(buffer_size)
out_queue = Queue(buffer_size)
out_order = [0]
# start a read worker in a thread
target = order_read_worker if order else read_worker
t = Thread(target=target, args=(reader, in_queue))
t.daemon = True
t.start()
# start several handle_workers
target = order_handle_worker if order else handle_worker
args = (in_queue, out_queue, mapper, out_order) if order else (
in_queue, out_queue, mapper)
workers = []
for i in range(process_num):
worker = Thread(target=target, args=args)
worker.daemon = True
workers.append(worker)
for w in workers:
w.start()
sample = out_queue.get()
while not isinstance(sample, XmapEndSignal):
yield sample
sample = out_queue.get()
finish = 1
while finish < process_num:
sample = out_queue.get()
if isinstance(sample, XmapEndSignal):
finish += 1
else:
yield sample
return xreader
def multiprocess_reader(readers, use_pipe=True, queue_size=1000):
"""
multiprocess_reader use python multi process to read data from readers
and then use multiprocess.Queue or multiprocess.Pipe to merge all
data. The process number is equal to the number of input readers, each
process call one reader.
Multiprocess.Queue require the rw access right to /dev/shm, some
platform does not support.
you need to create multiple readers first, these readers should be independent
to each other so that each process can work independently.
An example:
.. code-block:: python
reader0 = reader(["file01", "file02"])
reader1 = reader(["file11", "file12"])
reader1 = reader(["file21", "file22"])
reader = multiprocess_reader([reader0, reader1, reader2],
queue_size=100, use_pipe=False)
"""
try:
import ujson as json
except Exception as e:
sys.stderr.write("import ujson error: " + str(e) + " use json\n")
import json
assert type(readers) is list and len(readers) > 0
def _read_into_queue(reader, queue):
for sample in reader():
if sample is None:
raise ValueError("sample has None")
queue.put(sample)
queue.put(None)
def queue_reader():
queue = multiprocessing.Queue(queue_size)
for reader in readers:
p = multiprocessing.Process(
target=_read_into_queue, args=(reader, queue))
p.start()
reader_num = len(readers)
finish_num = 0
while finish_num < reader_num:
sample = queue.get()
if sample is None:
finish_num += 1
else:
yield sample
def _read_into_pipe(reader, conn):
for sample in reader():
if sample is None:
raise ValueError("sample has None!")
conn.send(json.dumps(sample))
conn.send(json.dumps(None))
conn.close()
def pipe_reader():
conns = []
for reader in readers:
parent_conn, child_conn = multiprocessing.Pipe()
conns.append(parent_conn)
p = multiprocessing.Process(
target=_read_into_pipe, args=(reader, child_conn))
p.start()
reader_num = len(readers)
finish_num = 0
conn_to_remove = []
while finish_num < reader_num:
for conn in conn_to_remove:
conns.remove(conn)
conn_to_remove = []
for conn in conns:
sample = json.loads(conn.recv())
if sample is None:
finish_num += 1
conn.close()
conn_to_remove.append(conn)
else:
yield sample
if use_pipe:
return pipe_reader
else:
return queue_reader
def _buf2lines(buf, line_break="\n"):
# FIXME: line_break should be automatically configured.
lines = buf.split(line_break)
return lines[:-1], lines[-1]
class PipeReader:
"""
PipeReader read data by stream from a command, take it's
stdout into a pipe buffer and redirect it to the parser to
parse, then yield data as your desired format.
You can using standard linux command or call another program
to read data, from HDFS, Ceph, URL, AWS S3 etc:
.. code-block:: python
cmd = "hadoop fs -cat /path/to/some/file"
cmd = "cat sample_file.tar.gz"
cmd = "curl http://someurl"
cmd = "python print_s3_bucket.py"
An example:
.. code-block:: python
def example_reader():
for f in myfiles:
pr = PipeReader("cat %s"%f)
for l in pr.get_line():
sample = l.split(" ")
yield sample
"""
def __init__(self, command, bufsize=8192, file_type="plain"):
if not isinstance(command, str):
raise TypeError("left_cmd must be a string")
if file_type == "gzip":
self.dec = zlib.decompressobj(
32 + zlib.MAX_WBITS) # offset 32 to skip the header
self.file_type = file_type
self.bufsize = bufsize
self.process = subprocess.Popen(
command.split(" "), bufsize=bufsize, stdout=subprocess.PIPE)
def get_line(self, cut_lines=True, line_break="\n"):
"""
:param cut_lines: cut buffer to lines
:type cut_lines: bool
:param line_break: line break of the file, like '\\\\n' or '\\\\r'
:type line_break: string
:return: one line or a buffer of bytes
:rtype: string
"""
remained = ""
while True:
buff = self.process.stdout.read(self.bufsize)
if buff:
if self.file_type == "gzip":
decomp_buff = cpt.to_text(self.dec.decompress(buff))
elif self.file_type == "plain":
decomp_buff = cpt.to_text(buff)
else:
raise TypeError("file_type %s is not allowed" %
self.file_type)
if cut_lines:
lines, remained = _buf2lines(''.join(
[remained, decomp_buff]), line_break)
for line in lines:
yield line
else:
yield decomp_buff
else:
break
class Fake(object):
"""
fake reader will cache the first data it read and yield it out for data_num times.
It is used to cache a data from real reader and use it for speed testing.
:param reader: the origin reader
:param data_num: times that this reader will yield data.
:return: a fake reader.
Examples:
.. code-block:: python
def reader():
for i in range(10):
yield i
fake_reader = Fake()(reader, 100)
"""
def __init__(self):
self.data = None
self.yield_num = 0
def __call__(self, reader, data_num):
def fake_reader():
if self.data is None:
self.data = next(reader())
while self.yield_num < data_num:
yield self.data
self.yield_num += 1
self.yield_num = 0
return fake_reader
|
test_waitable.py
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import unittest
import rclpy
from rclpy.callback_groups import MutuallyExclusiveCallbackGroup, ReentrantCallbackGroup
from rclpy.clock import Clock
from rclpy.clock import ClockType
from rclpy.executors import SingleThreadedExecutor
from rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy
from rclpy.node import check_for_type_support
from rclpy.qos import QoSProfile
from rclpy.task import Future
from rclpy.waitable import NumberOfEntities
from rclpy.waitable import Waitable
from test_msgs.msg import Empty as EmptyMsg
from test_msgs.srv import Empty as EmptySrv
check_for_type_support(EmptyMsg)
check_for_type_support(EmptySrv)
class ClientWaitable(Waitable):
def __init__(self, node):
super().__init__(ReentrantCallbackGroup())
with node.handle as node_capsule:
self.client = _rclpy.rclpy_create_client(
node_capsule, EmptySrv, 'test_client', QoSProfile(depth=10).get_c_qos_profile())
self.client_index = None
self.client_is_ready = False
self.node = node
self.future = None
def is_ready(self, wait_set):
"""Return True if entities are ready in the wait set."""
if _rclpy.rclpy_wait_set_is_ready('client', wait_set, self.client_index):
self.client_is_ready = True
return self.client_is_ready
def take_data(self):
"""Take stuff from lower level so the wait set doesn't immediately wake again."""
if self.client_is_ready:
self.client_is_ready = False
return _rclpy.rclpy_take_response(self.client, EmptySrv.Response)
return None
async def execute(self, taken_data):
"""Execute work after data has been taken from a ready wait set."""
test_data = {}
if isinstance(taken_data[1], EmptySrv.Response):
test_data['client'] = taken_data[1]
self.future.set_result(test_data)
def get_num_entities(self):
"""Return number of each type of entity used."""
return NumberOfEntities(0, 0, 0, 1, 0)
def add_to_wait_set(self, wait_set):
"""Add entities to wait set."""
self.client_index = _rclpy.rclpy_wait_set_add_entity('client', wait_set, self.client)
class ServerWaitable(Waitable):
def __init__(self, node):
super().__init__(ReentrantCallbackGroup())
with node.handle as node_capsule:
self.server = _rclpy.rclpy_create_service(
node_capsule, EmptySrv, 'test_server', QoSProfile(depth=10).get_c_qos_profile())
self.server_index = None
self.server_is_ready = False
self.node = node
self.future = None
def is_ready(self, wait_set):
"""Return True if entities are ready in the wait set."""
if _rclpy.rclpy_wait_set_is_ready('service', wait_set, self.server_index):
self.server_is_ready = True
return self.server_is_ready
def take_data(self):
"""Take stuff from lower level so the wait set doesn't immediately wake again."""
if self.server_is_ready:
self.server_is_ready = False
return _rclpy.rclpy_take_request(self.server, EmptySrv.Request)
return None
async def execute(self, taken_data):
"""Execute work after data has been taken from a ready wait set."""
test_data = {}
if isinstance(taken_data[0], EmptySrv.Request):
test_data['server'] = taken_data[0]
self.future.set_result(test_data)
def get_num_entities(self):
"""Return number of each type of entity used."""
return NumberOfEntities(0, 0, 0, 0, 1)
def add_to_wait_set(self, wait_set):
"""Add entities to wait set."""
self.server_index = _rclpy.rclpy_wait_set_add_entity('service', wait_set, self.server)
class TimerWaitable(Waitable):
def __init__(self, node):
super().__init__(ReentrantCallbackGroup())
self._clock = Clock(clock_type=ClockType.STEADY_TIME)
period_nanoseconds = 10000
with self._clock.handle as clock_capsule, node.context.handle as context_capsule:
self.timer = _rclpy.rclpy_create_timer(
clock_capsule, context_capsule, period_nanoseconds)
self.timer_index = None
self.timer_is_ready = False
self.node = node
self.future = None
def is_ready(self, wait_set):
"""Return True if entities are ready in the wait set."""
if _rclpy.rclpy_wait_set_is_ready('timer', wait_set, self.timer_index):
self.timer_is_ready = True
return self.timer_is_ready
def take_data(self):
"""Take stuff from lower level so the wait set doesn't immediately wake again."""
if self.timer_is_ready:
self.timer_is_ready = False
_rclpy.rclpy_call_timer(self.timer)
return 'timer'
return None
async def execute(self, taken_data):
"""Execute work after data has been taken from a ready wait set."""
test_data = {}
if 'timer' == taken_data:
test_data['timer'] = taken_data
self.future.set_result(test_data)
def get_num_entities(self):
"""Return number of each type of entity used."""
return NumberOfEntities(0, 0, 1, 0, 0)
def add_to_wait_set(self, wait_set):
"""Add entities to wait set."""
self.timer_index = _rclpy.rclpy_wait_set_add_entity('timer', wait_set, self.timer)
class SubscriptionWaitable(Waitable):
def __init__(self, node):
super().__init__(ReentrantCallbackGroup())
with node.handle as node_capsule:
self.subscription = _rclpy.rclpy_create_subscription(
node_capsule, EmptyMsg, 'test_topic', QoSProfile(depth=10).get_c_qos_profile())
self.subscription_index = None
self.subscription_is_ready = False
self.node = node
self.future = None
def is_ready(self, wait_set):
"""Return True if entities are ready in the wait set."""
if _rclpy.rclpy_wait_set_is_ready('subscription', wait_set, self.subscription_index):
self.subscription_is_ready = True
return self.subscription_is_ready
def take_data(self):
"""Take stuff from lower level so the wait set doesn't immediately wake again."""
if self.subscription_is_ready:
self.subscription_is_ready = False
msg_info = _rclpy.rclpy_take(self.subscription, EmptyMsg, False)
if msg_info is not None:
return msg_info[0]
return None
async def execute(self, taken_data):
"""Execute work after data has been taken from a ready wait set."""
test_data = {}
if isinstance(taken_data, EmptyMsg):
test_data['subscription'] = taken_data
self.future.set_result(test_data)
def get_num_entities(self):
"""Return number of each type of entity used."""
return NumberOfEntities(1, 0, 0, 0, 0)
def add_to_wait_set(self, wait_set):
"""Add entities to wait set."""
self.subscription_index = _rclpy.rclpy_wait_set_add_entity(
'subscription', wait_set, self.subscription)
class GuardConditionWaitable(Waitable):
def __init__(self, node):
super().__init__(ReentrantCallbackGroup())
with node.context.handle as context_capsule:
self.guard_condition = _rclpy.rclpy_create_guard_condition(context_capsule)
self.guard_condition_index = None
self.guard_is_ready = False
self.node = node
self.future = None
def is_ready(self, wait_set):
"""Return True if entities are ready in the wait set."""
if _rclpy.rclpy_wait_set_is_ready('guard_condition', wait_set, self.guard_condition_index):
self.guard_is_ready = True
return self.guard_is_ready
def take_data(self):
"""Take stuff from lower level so the wait set doesn't immediately wake again."""
if self.guard_is_ready:
self.guard_is_ready = False
return 'guard_condition'
return None
async def execute(self, taken_data):
"""Execute work after data has been taken from a ready wait set."""
test_data = {}
if 'guard_condition' == taken_data:
test_data['guard_condition'] = True
self.future.set_result(test_data)
def get_num_entities(self):
"""Return number of each type of entity used."""
return NumberOfEntities(0, 1, 0, 0, 0)
def add_to_wait_set(self, wait_set):
"""Add entities to wait set."""
self.guard_condition_index = _rclpy.rclpy_wait_set_add_entity(
'guard_condition', wait_set, self.guard_condition)
class MutuallyExclusiveWaitable(Waitable):
def __init__(self):
super().__init__(MutuallyExclusiveCallbackGroup())
def is_ready(self, wait_set):
return False
def take_data(self):
return None
async def execute(self, taken_data):
pass
def get_num_entities(self):
return NumberOfEntities(0, 0, 0, 0, 0)
def add_to_wait_set(self, wait_set):
pass
class TestWaitable(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.context = rclpy.context.Context()
rclpy.init(context=cls.context)
cls.node = rclpy.create_node(
'TestWaitable', namespace='/rclpy/test', context=cls.context,
allow_undeclared_parameters=True)
cls.executor = SingleThreadedExecutor(context=cls.context)
cls.executor.add_node(cls.node)
@classmethod
def tearDownClass(cls):
cls.executor.shutdown()
cls.node.destroy_node()
rclpy.shutdown(context=cls.context)
def start_spin_thread(self, waitable):
waitable.future = Future(executor=self.executor)
self.thr = threading.Thread(
target=self.executor.spin_until_future_complete, args=(waitable.future,), daemon=True)
self.thr.start()
return self.thr
def setUp(self):
pass
def tearDown(self):
self.node.remove_waitable(self.waitable)
# Ensure resources inside the waitable are destroyed before the node in tearDownClass
del self.waitable
def test_waitable_with_client(self):
self.waitable = ClientWaitable(self.node)
self.node.add_waitable(self.waitable)
server = self.node.create_service(EmptySrv, 'test_client', lambda req, resp: resp)
while not _rclpy.rclpy_service_server_is_available(self.waitable.client):
time.sleep(0.1)
thr = self.start_spin_thread(self.waitable)
_rclpy.rclpy_send_request(self.waitable.client, EmptySrv.Request())
thr.join()
assert self.waitable.future.done()
assert isinstance(self.waitable.future.result()['client'], EmptySrv.Response)
self.node.destroy_service(server)
def test_waitable_with_server(self):
self.waitable = ServerWaitable(self.node)
self.node.add_waitable(self.waitable)
client = self.node.create_client(EmptySrv, 'test_server')
thr = self.start_spin_thread(self.waitable)
client.call_async(EmptySrv.Request())
thr.join()
assert self.waitable.future.done()
assert isinstance(self.waitable.future.result()['server'], EmptySrv.Request)
self.node.destroy_client(client)
def test_waitable_with_timer(self):
self.waitable = TimerWaitable(self.node)
self.node.add_waitable(self.waitable)
thr = self.start_spin_thread(self.waitable)
thr.join()
assert self.waitable.future.done()
assert self.waitable.future.result()['timer']
def test_waitable_with_subscription(self):
self.waitable = SubscriptionWaitable(self.node)
self.node.add_waitable(self.waitable)
pub = self.node.create_publisher(EmptyMsg, 'test_topic', 1)
thr = self.start_spin_thread(self.waitable)
pub.publish(EmptyMsg())
thr.join()
assert self.waitable.future.done()
assert isinstance(self.waitable.future.result()['subscription'], EmptyMsg)
self.node.destroy_publisher(pub)
def test_waitable_with_guard_condition(self):
self.waitable = GuardConditionWaitable(self.node)
self.node.add_waitable(self.waitable)
thr = self.start_spin_thread(self.waitable)
_rclpy.rclpy_trigger_guard_condition(self.waitable.guard_condition)
thr.join()
assert self.waitable.future.done()
assert self.waitable.future.result()['guard_condition']
# Test that waitable doesn't crash with MutuallyExclusiveCallbackGroup
# https://github.com/ros2/rclpy/issues/264
def test_waitable_with_mutually_exclusive_callback_group(self):
self.waitable = MutuallyExclusiveWaitable()
self.node.add_waitable(self.waitable)
self.executor.spin_once(timeout_sec=0.1)
class TestNumberOfEntities(unittest.TestCase):
def test_add(self):
n1 = NumberOfEntities(1, 2, 3, 4, 5, 6)
n2 = NumberOfEntities(10, 20, 30, 40, 50, 60)
n = n1 + n2
assert n.num_subscriptions == 11
assert n.num_guard_conditions == 22
assert n.num_timers == 33
assert n.num_clients == 44
assert n.num_services == 55
assert n.num_events == 66
def test_add_assign(self):
n1 = NumberOfEntities(1, 2, 3, 4, 5, 6)
n2 = NumberOfEntities(10, 20, 30, 40, 50, 60)
n1 += n2
assert n1.num_subscriptions == 11
assert n1.num_guard_conditions == 22
assert n1.num_timers == 33
assert n1.num_clients == 44
assert n1.num_services == 55
assert n1.num_events == 66
|
sound.py
|
import RPi.GPIO as GPIO
import subprocess
import time
from os import listdir
import threading
'''
SCRIPT LOGIC:
When a button is pressed, do the following:
Play a sound for 20 minutes.
If a sound is already playing, play the next sound in the directory.
If at end of sound list, stop playing.
'''
'''
CONSTANTS
'''
# Directory where all the sounds we play can be found
SOUNDS_DIR = "/home/pi/rasp-sound-machine/sounds"
# Name of the app used to play the sound
SOUND_ARGS = ["aplay"]
# The number of seconds to play a sound for (20 minutes)
SECONDS_TO_PLAY = 1200
# The GPIO PIN we're listening to
GPIO_PIN = 4
'''
INITIAL SETUP
'''
# Configure GPIO PIN for input
GPIO.setmode(GPIO.BCM)
GPIO.setup(GPIO_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Get an array of all the sound files
files = listdir(SOUNDS_DIR)
# Get the total number of files
totalFiles = len(files)
# Keeps track of what file in the list we're playing
curFilePos = 0
# Keeps track of the PID for the currently playing sound
curPID = 0
# Our method that plays back a sound in a separate thread
def playback(audioFile):
args = list(SOUND_ARGS)
args.append(audioFile)
global curPID
# Make note of when playback started
startTime = time.time()
while True:
# Start playback
curPID = subprocess.Popen(args)
print("PID: %d" % curPID.pid)
# Wait for sound to finish playing
curPID.wait()
# Check to see if we were killed, if so, exit the loop
if (curPID.returncode == -9):
break;
# Check if we should start playback, or stop
curTime = time.time()
if curTime > startTime + SECONDS_TO_PLAY:
break;
else:
continue;
'''
LOOP, WAITING FOR BUTTON PRESS
'''
while True:
input_state = GPIO.input(GPIO_PIN)
if input_state == False:
print("Button Pressed")
# If this is the last file, kill the sound, reset our count and restart the loop
if curFilePos == totalFiles:
print("Killing sound and stopping playback")
curPID.kill()
curFilePos = 0
# Sleep to ensure button isn't clicked too quickly
time.sleep(.5)
continue
# Kill the old sound if it's playing
if curPID != 0:
curPID.kill()
# Play back audio in a separate thread
playThread = threading.Thread(target=playback, args=(SOUNDS_DIR + "/" + files[curFilePos],))
playThread.start()
# Increment the current file position
curFilePos += 1
# Sleep to ensure button isn't clicked too quickly
time.sleep(.5)
|
slurm.py
|
"""
DMLC submission script, SLURM version
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
import subprocess, logging
from threading import Thread
from . import tracker
def get_mpi_env(envs):
"""get the slurm command for setting the environment
"""
cmd = ''
for k, v in envs.items():
cmd += '%s=%s ' % (k, str(v))
return cmd
def submit(args):
"""Submission script with SLURM."""
def mpi_submit(nworker, nserver, pass_envs):
"""Internal closure for job submission."""
def run(prog):
"""run the program"""
subprocess.check_call(prog, shell=True)
cmd = ' '.join(args.command)
pass_envs['DMLC_JOB_CLUSTER'] = 'slurm'
if args.slurm_worker_nodes is None:
nworker_nodes = nworker
else:
nworker_nodes=args.slurm_worker_nodes
# start workers
if nworker > 0:
logging.info('Start %d workers by srun' % nworker)
pass_envs['DMLC_ROLE'] = 'worker'
prog = '%s srun --share --exclusive=user -N %d -n %d %s' % (get_mpi_env(pass_envs), nworker_nodes, nworker, cmd)
thread = Thread(target=run, args=(prog,))
thread.setDaemon(True)
thread.start()
if args.slurm_server_nodes is None:
nserver_nodes = nserver
else:
nserver_nodes=args.slurm_server_nodes
# start servers
if nserver > 0:
logging.info('Start %d servers by srun' % nserver)
pass_envs['DMLC_ROLE'] = 'server'
prog = '%s srun --share --exclusive=user -N %d -n %d %s' % (get_mpi_env(pass_envs), nserver_nodes, nserver, cmd)
thread = Thread(target=run, args=(prog,))
thread.setDaemon(True)
thread.start()
tracker.submit(args.num_workers, args.num_servers,
fun_submit=mpi_submit,
pscmd=(' '.join(args.command)))
|
main.py
|
"""\
Main wxGlade module: defines wxGladeFrame which contains the buttons to add
widgets and initializes all the stuff (tree, frame_property, etc.)
@copyright: 2002-2007 Alberto Griggio
@copyright: 2011-2016 Carsten Grohmann
@copyright: 2016-2021 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
# import general python modules
import logging, os, os.path, sys, math, time, functools
import wx
from xml.sax import SAXParseException
# import project modules
import application
import common, config, compat, misc, history
import new_properties as np
import preferencesdialog, msgdialog, bugdialog, about
import log
import template
from tree import WidgetTree
from xml_parse import XmlWidgetBuilder, ProgressXmlWidgetBuilder, XmlParsingError
class FileDropTarget(wx.FileDropTarget):
# file drop target that checks first whether the property panel is the target
def __init__(self, parent):
wx.FileDropTarget.__init__(self)
self.parent = parent
if config.debugging:
def OnDragOver(self, x, y, defResult):
x0,y0 = self.parent.GetClientAreaOrigin()
screen_xy = self.parent.ClientToScreen( (x-x0,y-y0) )
ctrl = wx.FindWindowAtPoint( screen_xy )
print("DragOver", x0,y0, x-x0,y-y0, ctrl)
return wx.FileDropTarget.OnDragOver(self, x,y, defResult)
def OnDropFiles(self, x, y, filenames):
if len(filenames) > 1:
wx.MessageBox( _("Please only drop one file at a time"), "wxGlade", wx.ICON_ERROR )
return False
if not filenames or not os.path.exists(filenames[0]): return False
# find control under drop point; this does also work if a dialog is open
x0,y0 = self.parent.GetClientAreaOrigin()
screen_xy = self.parent.ClientToScreen( (x-x0,y-y0) )
ctrl = c = wx.FindWindowAtPoint( screen_xy )
# go up the hierarchy and find a widget with an 'on_drop_files' method
while c:
if hasattr(c, "on_drop_files"):
handled = c.on_drop_files(screen_xy, ctrl, filenames)
if handled: return True
if c is self.parent.property_panel: break
if isinstance(c, wx.Dialog): return False # when a dialog is open, don't open wxg or xrc files
c = c.GetParent()
# not handled by a control; try to open .wxg or .XRC file
if not self.parent.ask_save(): return False
path = filenames[0]
if os.path.splitext(path)[1].upper() == ".XRC":
self.parent.import_xrc(path, ask_save=False)
else:
self.parent._open_app(path)
self.parent.cur_dir = os.path.dirname(path)
return True
class wxGladePropertyPanel(wx.Panel):
"Panel used to display the Properties of the various widgets"
def __init__(self, parent):
wx.Panel.__init__( self, parent, -1, name='PropertyPanel' )
self.SetBackgroundColour( compat.wx_SystemSettings_GetColour(wx.SYS_COLOUR_BTNFACE) )
self.current_widget = None # instance currently being edited
self.next_widget = None # the next one, will only be edited after a small delay
self.pagenames = None
sizer = wx.BoxSizer(wx.VERTICAL)
self.heading = wx.TextCtrl(self, style=wx.TE_READONLY)
sizer.Add(self.heading, 0, wx.EXPAND, 0)
self.notebook = wx.Notebook(self)
self.notebook.Bind(wx.EVT_SIZE, self.on_notebook_size)
sizer.Add(self.notebook, 1, wx.EXPAND, 0)
# for GTK3: add a panel to determine page size
p = wx.Panel(self.notebook)
self.notebook.AddPage(p, "panel")
self._notebook_decoration_size = None
p.Bind(wx.EVT_SIZE, self.on_panel_size)
self.SetSizer(sizer)
self.Layout()
def on_drop_files(self, screen_xy, ctrl, filenames):
if not self.current_widget: return False
for p_name in self.current_widget.PROPERTIES:
if p_name[0].isupper(): continue
prop = self.current_widget.properties.get(p_name)
if not prop or not hasattr(prop, "on_drop_file"): continue
if ( hasattr(prop, "label_ctrl") and prop.label_ctrl.ScreenRect.Contains( screen_xy ) and
prop.label_ctrl.IsShownOnScreen() ) or prop.has_control(ctrl):
return prop.on_drop_file(filenames[0])
return False
####################################################################################################################
# new editor interface
def set_widget(self, widget, force=False):
if widget is self.current_widget and not force:
# just update
return
self.next_widget = widget
if self.current_widget:
# this might not be executed if there was an error during creation of the property editors
for editor in self.current_widget.properties.values():
editor.destroy_editor()
self.current_widget = None # delete the reference
wx.CallLater( 150, self.edit_properties, widget )
def edit_properties(self, edit_widget):
# this will be called with a delay
if edit_widget is not self.next_widget:
# wait for another call...
return
if self._notebook_decoration_size is None:
# try again later
wx.CallLater( 150, self.edit_properties, edit_widget )
return
self.current_widget = None
self.create_editor(edit_widget)
# this code might not be reached in case of an error
self.current_widget = edit_widget
if edit_widget:
# XXX set status bar
klass = edit_widget.get_prop_value("class", edit_widget.WX_CLASS)
self.heading.SetValue( _('Properties - %s - <%s>:') % (klass, edit_widget.name) )
else:
self.heading.SetValue( _('Properties') )
def create_editor(self, edit_widget):
# fill the frame with a notebook of property editors
if not self.notebook: return # already deleted
self.current_widget_class = edit_widget.__class__
if wx.Platform != "__WXMSW__" :
focus_before = self.FindFocus()
self.notebook.Hide()
# remember the notebook page to be selected
selection = self.notebook.GetSelection()
select_page = self.pagenames[selection] if selection!=-1 else None
# clear notebook pages
#self.notebook.DeleteAllPages() # deletes also the windows on the pages
while self.notebook.PageCount:
print("DELETE PAGE; new widget:", edit_widget)
self.notebook.DeletePage(self.notebook.PageCount-1)
self.pagenames = pagenames = []
self.sizers = []
if not edit_widget: return
current_page = current_sizer = current_pagename = None
property_instance = None
for prop in edit_widget.PROPERTIES:
if prop[0].isupper():
# end previous page
if current_page is not None:
self.end_page(current_page, current_sizer, current_pagename)
current_page = None
# start new page
current_pagename = prop
if prop=="Layout" and not edit_widget._has_layout:continue
if prop=="Events" and edit_widget.events is None: continue
current_page = self.start_page(prop)
current_sizer = wx.BoxSizer(wx.VERTICAL)
self.sizers.append(current_sizer)
self.pagenames.append(prop)
continue
if current_pagename=="Layout" and not edit_widget._has_layout: continue
# a property or None
property_instance_ = edit_widget.properties.get(prop)
if property_instance_ is not None:
property_instance = property_instance_
property_instance.create_editor(current_page, current_sizer)
if current_page is not None:
self.end_page(current_page, current_sizer, current_pagename)
if select_page and select_page in pagenames:
index = pagenames.index(select_page)
self.notebook.SetSelection(index)
else:
self.notebook.SetSelection(0)
self.notebook.Show()
if wx.Platform != "__WXMSW__" and focus_before is common.app_tree:
focus_before.SetFocus()
def start_page(self, name):
# create a ScrolledWindow and a Panel; with only ScrolledWindow, scrolling on gtk 3 does not work
scrolled = wx.ScrolledWindow( self.notebook, name=name)
panel = wx.Panel(scrolled, name="%s properties"%name)
if wx.VERSION[0]<3:
panel.SetBackgroundColour(scrolled.GetBackgroundColour())
return panel
def end_page(self, panel, sizer, header, select=False):
sizer.AddSpacer(30)
panel.SetAutoLayout(1)
panel.SetSizer(sizer)
sizer.Layout()
sizer.Fit(panel)
scrolled = panel.GetParent()
self.notebook.AddPage(scrolled, _(header),select=select)
self._set_page_size(scrolled)
def _set_page_size(self, scrolled):
# set ScrolledWindow and Panel to available size; enable scrolling, if required
# gets available size for notebook pages
ws, hs = self.notebook.GetSize()
ws -= self._notebook_decoration_size[0]
hs -= self._notebook_decoration_size[1]
w_scrollbar = wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X) # width a of a scrollbar
panel = [w for w in scrolled.GetChildren() if isinstance(w, wx.Panel)][0]
szr = panel.GetSizer()
if not szr: return
wm, hm = szr.GetMinSize()
if hs<hm:
# best size is smaller than the available height -> enable scrolling
scrolled.SetScrollbars(1, 5, 1, int(math.ceil(hm/5.0)))
panel.SetSize( (ws-w_scrollbar, hm) )
else:
panel.SetSize( (ws, hs) )
def on_notebook_size(self, event):
# calculate available size for pages
if self._notebook_decoration_size:
for scrolled in self.notebook.GetChildren():
self._set_page_size(scrolled)
if event: event.Skip()
def on_panel_size(self, event):
# when the dummy panel receives a size event, we know that things are ready to calculate the notebook pages size
# calculate decoration size from the dummy panel that was added initially
if event.GetSize() != (0,0):
wp, hp = self.notebook.GetPage(0).GetSize() # page/panel size
wn, hn = self.notebook.GetSize() # notebook size
self._notebook_decoration_size = (wn-wp, hn-hp)
self.notebook.DeletePage(0)
else:
# Mac OS: inital event on creation
event.Skip()
# don't use any more for application; causes crashes on Cent OS 7; still used when testing
class wxGladeArtProvider(wx.ArtProvider):
def CreateBitmap(self, artid, client, size):
if wx.Platform == '__WXGTK__' and artid == wx.ART_FOLDER:
return wx.Bitmap(os.path.join(config.icons_path, 'closed_folder.png'))
return wx.NullBitmap
class wxGladePalettePanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
common.palette = self # for building the buttons
self.SetBackgroundColour( compat.wx_SystemSettings_GetColour(wx.SYS_COLOUR_BTNFACE) )
# load the available code generators
all_widgets = common.init_codegen()
if not config.use_gui: return
self.all_togglebuttons = [] # used by reset_togglebuttons
# for keyboard navigation:
self._id_to_coordinate = {}
self._ids_by_row = []
self._section_to_row = {}
# build the palette for all_widgets
sizer = wx.FlexGridSizer(0, 2, 0, 0)
maxlen = max([len(all_widgets[sect]) for sect in all_widgets]) # the maximum number of buttons in a section
for row, section in enumerate(all_widgets):
self._section_to_row[section] = row
self._ids_by_row.append([])
if section:
label = wx.StaticText(self, -1, "%s:" % section.replace('&', '&&'))
sizer.Add( label, 1, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 2 )
bsizer = wx.BoxSizer()
for col, button in enumerate(all_widgets[section]):
self._ids_by_row[-1].append(button.Id)
self._id_to_coordinate[button.Id] = (row,col)
bsizer.Add(button, flag=wx.ALL, border=1)
if isinstance(button, wx.ToggleButton):
self.all_togglebuttons.append(button)
sizer.Add(bsizer)
self.SetSizer(sizer)
# on platforms other than Windows, we'll set the ToggleButton background colour to indicate the selection
if wx.Platform == "__WXMSW__":
self._highlight_colour = None
else:
self._highlight_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT)
self.Bind(wx.EVT_CHAR_HOOK, self.on_char)
def reset_togglebuttons(self, keep=None):
# un-toggle all buttons except keep
for button in self.all_togglebuttons:
if keep is not None and button is keep:
if self._highlight_colour:
button.SetBackgroundColour(self._highlight_colour)
continue
if self._highlight_colour and button.GetBackgroundColour()==self._highlight_colour:
button.SetBackgroundColour(wx.NullColour)
if button.GetValue(): button.SetValue(False)
def on_char(self, event):
key = (event.GetKeyCode(), event.GetModifiers()) # modifiers: 1,2,4 for Alt, Ctrl, Shift
if key[1]: return event.Skip()
focused = self.FindFocus()
if not focused or not focused.Id in self._id_to_coordinate:
return event.Skip()
row, col = self._id_to_coordinate[focused.Id]
new_row = new_col = None
if key[0]==wx.WXK_UP:
if row>0: new_row = row-1
elif key[0]==wx.WXK_DOWN:
if row < len(self._ids_by_row)-1: new_row = row+1
elif key[0]==wx.WXK_LEFT:
if col>0: new_col = col-1
elif key[0]==wx.WXK_RIGHT:
if col < len(self._ids_by_row[row])-1: new_col = col+1
elif key[0]==wx.WXK_HOME:
new_col = 0
elif key[0]==wx.WXK_END:
new_col = len(self._ids_by_row[row])-1
elif key[0]==wx.WXK_PAGEUP:
new_row = 0
elif key[0]==wx.WXK_PAGEDOWN:
new_row = len(self._ids_by_row)-1
elif (ord("A") <= key[0] <= ord("Z")) and chr(key[0]) in misc.palette_hotkeys:
section = misc.palette_hotkeys[chr(key[0])]
new_row = self._section_to_row[section]
new_col = 0
else:
return event.Skip()
if new_row is None and new_col is None:
# limits hit
wx.Bell()
else:
if new_col is None: new_col = min(col, len(self._ids_by_row[new_row])-1)
if new_row is None: new_row = row
focus = self.FindWindowById(self._ids_by_row[new_row][new_col])
if focus: focus.SetFocus()
import shell_frame
class ShellFrame(shell_frame.ShellFrame):
def on_btn_assign(self, event):
# insert a variable assignment
widget = misc.focused_widget
if not widget:
event.Skip()
return
path = widget.get_path()
command = 'widget = common.root.find_widget_from_path("%s")\r\n'%path
#self.shell.push(command) # or .write ?
self.shell.write(command)
class wxGladeFrame(wx.Frame):
"Main frame of wxGlade"
def __init__(self):
version = config.version
pos, size, layout = self.init_layout_settings()
wx.Frame.__init__(self, None, -1, "wxGlade v%s" % version, pos=pos, size=size,
style=wx.DEFAULT_FRAME_STYLE, name='MainFrame')
common.main = self
self._set_icon()
self.create_menu()
self.create_toolbar()
style = wx.SP_3D | wx.SP_LIVE_UPDATE
self.splitter1 = wx.SplitterWindow(self, style=style)
self.splitter2 = wx.SplitterWindow(self.splitter1, style=style)
self.palette = wxGladePalettePanel(self.splitter2)
# create the property and the tree frame
common.property_panel = self.property_panel = wxGladePropertyPanel(self.splitter2)
common.root = app = application.Application()
common.app_tree = self.tree = WidgetTree(self.splitter1, app)
self.splitter1.SplitVertically(self.splitter2, self.tree)
self.splitter2.SplitHorizontally(self.palette, self.property_panel)
self.switch_layout(layout, initial=True)
# last visited directory, used on GTK for wxFileDialog
self.cur_dir = config.preferences.open_save_path
# set a drop target for us...
self._droptarget = FileDropTarget(self)
self.SetDropTarget(self._droptarget)
self.create_statusbar() # create statusbar for display of messages
self.Show()
#misc.set_focused_widget(common.root)
self.Bind(wx.EVT_CLOSE, self.on_close)
# disable autosave checks during unittests
if config.testing: return
self.init_autosave()
self.check_autosaved()
self.Bind(wx.EVT_CHAR_HOOK, self.on_char_hook)
if config.debugging:
self.splitter1.Bind(wx.EVT_SPLITTER_SASH_POS_CHANGED, self.on_sash)
self.splitter2.Bind(wx.EVT_SPLITTER_SASH_POS_CHANGED, self.on_sash)
def on_sash(self, event):
# XXX not yet used, but it could be used to re-format the palette panel
layout = self.layout_settings["layout"]
if layout==0:
size = (self.splitter1.GetSashPosition(), self.splitter2.GetSashPosition())
elif layout==1:
size = (self.splitter2.GetSashPosition(), self.splitter1.GetSashPosition())
elif layout==2:
size = (self.GetClientSize()[0], self.splitter2.GetSashPosition())
def on_char_hook(self, event):
# bound to EVT_CHAR_HOOK
focus = parent = self.FindFocus()
grid = None # will be set if a grid or a grid's child is focused
window_type = None
while parent:
# go up and identify parent: Palette, Property or Tree
if isinstance(parent, wx.grid.Grid):
grid = parent
if parent is self.palette:
window_type = "palette"
elif parent is self.tree:
window_type = "tree"
elif parent is self.property_panel:
window_type = "properties"
if window_type: break
parent = parent.GetParent()
# forward to specific controls / properties? (on wx 2.8 installing EVT_CHAR_HOOK on controls does not work)
if window_type=="properties" and grid and grid.Name!="grid":
# forward event to grid property?
if misc.focused_widget.properties[grid.Name].on_char(event):
return
if window_type=="tree":
if common.app_tree.on_char(event):
return
# global handler
misc.handle_key_event(event, window_type)
def set_widget(self, widget):
# update redo/repeat tools and menus
if not common.history: return
redo_state = (common.history.can_undo, common.history.can_redo, common.history.can_repeat)
if self._previous_redo_state == redo_state:
return
self._menu_undo.Enable(common.history.can_undo)
self._menu_redo.Enable(common.history.can_redo)
self._menu_repeat.Enable(common.history.can_repeat)
if self._tool_redo:
self._tool_undo.Enable(common.history.can_undo)
self._tool_redo.Enable(common.history.can_redo)
self._tool_repeat.Enable(common.history.can_repeat)
self.toolbar.Realize()
self._previous_redo_state = redo_state
# menu and actions #################################################################################################
def create_menu(self):
self._previous_redo_state = None
menu_bar = wx.MenuBar()
compat.wx_ToolTip_SetDelay(1000)
compat.wx_ToolTip_SetAutoPop(30000)
append_menu_item = misc.append_menu_item
# File menu
file_menu = wx.Menu(style=wx.MENU_TEAROFF)
NEW = append_menu_item(file_menu, -1, _("&New\tCtrl+N"), wx.ART_NEW)
misc.bind_menu_item(self, NEW, self.new_app)
item = append_menu_item(file_menu, -1, _("New from &Template...\tShift+Ctrl+N"))
misc.bind_menu_item(self, item, self.new_app_from_template)
OPEN = append_menu_item(file_menu, -1, _("&Open...\tCtrl+O"), wx.ART_FILE_OPEN)
misc.bind_menu_item(self, OPEN, self.open_app)
SAVE = append_menu_item(file_menu, -1, _("&Save\tCtrl+S"), wx.ART_FILE_SAVE)
misc.bind_menu_item(self, SAVE, self.save_app)
SAVE_AS = append_menu_item(file_menu, -1, _("Save As..."), wx.ART_FILE_SAVE_AS)
misc.bind_menu_item(self, SAVE_AS, self.save_app_as)
item = append_menu_item(file_menu, -1, _("Save As Template..."))
misc.bind_menu_item(self, item, self.save_app_as_template)
file_menu.AppendSeparator() # ----------------------------------------------------------------------------------
GENERATE_CODE = append_menu_item(file_menu, -1, _("&Generate Code\tCtrl+G"), wx.ART_EXECUTABLE_FILE)
misc.bind_menu_item(self, GENERATE_CODE, lambda: common.root.generate_code())
file_menu.AppendSeparator() # ----------------------------------------------------------------------------------
item = append_menu_item(file_menu, -1, _("&Import from XRC..."))
misc.bind_menu_item(self, item, self.import_xrc)
file_menu.AppendSeparator() # ----------------------------------------------------------------------------------
EXIT = append_menu_item(file_menu, -1, _('E&xit\tCtrl+Q'), wx.ART_QUIT)
misc.bind_menu_item(self, EXIT, self.Close)
menu_bar.Append(file_menu, _("&File"))
# Edit menu ====================================================================================================
edit_menu = wx.Menu(style=wx.MENU_TEAROFF)
# these menu items will be updated
self._menu_undo = item = append_menu_item(edit_menu, -1, _('Un-do\tCtrl+Z'),
helpString="Un-do the last property modification on another widget")
misc.bind_menu_item(self, item, lambda: common.history.undo(misc.focused_widget))
self._menu_redo = item = append_menu_item(edit_menu, -1, _('Re-do\tCtrl+Y'),
helpString="Re-do the last property modification on another widget")
misc.bind_menu_item(self, item, lambda: common.history.repeat(misc.focused_widget))
self._menu_repeat = item = append_menu_item(edit_menu, -1, _('Repeat\tCtrl-R'),
helpString="Repeat the last property modifications on another widget (multiple modifications, if applicable)")
misc.bind_menu_item(self, item, lambda: common.history.repeat(misc.focused_widget))
edit_menu.AppendSeparator() # ----------------------------------------------------------------------------------
item = append_menu_item(edit_menu, -1, _('Template Manager...'))
misc.bind_menu_item(self, item, self.manage_templates)
item = append_menu_item(edit_menu, wx.ID_PREFERENCES, _('Preferences...'), "prefs.png")
misc.bind_menu_item(self, item, self.edit_preferences)
menu_bar.Append(edit_menu, _("&Edit"))
# Windows menu: layout and focus ===============================================================================
view_menu = wx.Menu(style=wx.MENU_TEAROFF)
i = append_menu_item(view_menu, -1, _("Layout &1: Tree\tAlt-1"), "../layout1.png")
misc.bind_menu_item(self, i, self.switch_layout, 0)
i = append_menu_item(view_menu, -1, _("Layout &2: Properties\tAlt-2"), "../layout2.png")
misc.bind_menu_item(self, i, self.switch_layout, 1)
i = append_menu_item(view_menu, -1, _("Layout &3: Narrow\tAlt-3"), "../layout3.png")
misc.bind_menu_item(self, i, self.switch_layout, 2)
view_menu.AppendSeparator()
i = append_menu_item(view_menu, -1, _("Focus &Tree\tF2"))
misc.bind_menu_item(self, i, self.show_tree)
i = append_menu_item(view_menu, -1, _("Focus &Properties\tF3"))
misc.bind_menu_item(self, i, self.show_props_window )
i = append_menu_item(view_menu, -1, _("Focus Pa&lette\tF4"))
misc.bind_menu_item(self, i, self.show_palette )
# submenu focus sections >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
view_props_menu = wx.Menu()
# randomly select set of shortcuts to be displayed:
if int(math.ceil(time.time())) % 2:
shortcuts = ["F8", "F9", "F10", "F11", "F12"]
else:
shortcuts = ["Ctrl-M", "Ctrl-L", "Ctrl-W", "Ctrl-E", "Ctrl-D"]
for sc, section in zip(shortcuts, ("Common", "Layout", "Widget", "Events", "Code")):
i = append_menu_item(view_props_menu, -1, _("Focus &%s\t%s"%(section, sc)))
misc.bind_menu_item(self, i, self.show_props_window, section)
view_menu.AppendSubMenu(view_props_menu, _("Focus Properties &Section"))
view_menu.AppendSeparator() # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
i = append_menu_item(view_menu, -1, _("Show/Hide &Design\tF6"))
misc.bind_menu_item(self, i, self.show_design_window)
self._m_pin_design_window = i = append_menu_item(view_menu, -1, _("&Pin &Design\tCtrl-P"), kind=wx.ITEM_CHECK)
misc.bind_menu_item(self, i, self.pin_design_window)
view_menu.AppendSeparator() # ----------------------------------------------------------------------------------
item = append_menu_item(view_menu, wx.ID_REFRESH, _("&Refresh Preview\tF5"), "refresh.png")
misc.bind_menu_item(self, item, self.preview)
menu_bar.Append(view_menu, _("&Windows"))
# Help menu ====================================================================================================
help_menu = wx.Menu(style=wx.MENU_TEAROFF)
MANUAL = append_menu_item(help_menu, -1, _('Manual\tF1'), wx.ART_HELP_BOOK)
misc.bind_menu_item(self, MANUAL, self.show_manual)
#item = append_menu_item(help_menu, -1, _('Tutorial'))
#misc.bind_menu_item(self, item, self.show_tutorial)
help_menu.AppendSeparator() # ----------------------------------------------------------------------------------
i = append_menu_item(help_menu, -1, _('Mailing list'))
misc.bind_menu_item(self, i, self.show_mailing_list)
i = append_menu_item(help_menu, -1, _('Bug tracker'))
misc.bind_menu_item(self, i, self.show_bug_tracker)
i = append_menu_item(help_menu, -1, _('Releases'))
misc.bind_menu_item(self, i, self.show_releases)
help_menu.AppendSeparator() # ----------------------------------------------------------------------------------
if config.debugging:
i = append_menu_item(help_menu, -1, 'Shell\tF7')
misc.bind_menu_item(self, i, self.create_shell_window)
help_menu.AppendSeparator()
item = append_menu_item(help_menu, wx.ID_ABOUT, _('About'), wx.ART_INFORMATION)
misc.bind_menu_item(self, item, self.show_about_box)
menu_bar.Append(help_menu, _('&Help'))
self.SetMenuBar(menu_bar)
# Mac tweaks...
if wx.Platform == "__WXMAC__":
if compat.IS_PHOENIX:
wx.PyApp.SetMacAboutMenuItemId(wx.ID_ABOUT)
wx.PyApp.SetMacPreferencesMenuItemId(wx.ID_PREFERENCES)
wx.PyApp.SetMacExitMenuItemId(wx.ID_EXIT)
wx.PyApp.SetMacHelpMenuTitleName(_('&Help'))
else:
wx.App_SetMacAboutMenuItemId(wx.ID_ABOUT)
wx.App_SetMacPreferencesMenuItemId(wx.ID_PREFERENCES)
wx.App_SetMacExitMenuItemId(wx.ID_EXIT)
wx.App_SetMacHelpMenuTitleName(_('&Help'))
# file history support
num_entries = config.preferences.number_history
self.file_history = wx.FileHistory(num_entries)
self.file_history.UseMenu(file_menu)
files = common.load_file_history()
files.reverse()
for path in files:
self.file_history.AddFileToHistory(path.strip())
self.Bind(wx.EVT_MENU_RANGE, self.open_from_history, id=wx.ID_FILE1, id2=wx.ID_FILE1+num_entries-1)
def _add_label_tool(self, tb, size, id, label, bmp, itemtype, msg, msg_long=None):
ADD = tb.AddLabelTool if compat.IS_CLASSIC else tb.AddTool
if compat.IS_PHOENIX:
method = getattr(tb, "AddTool")
else:
method = getattr(tb, "AddLabelTool")
if isinstance(bmp, str) and not bmp.startswith("wxART_"):
bmp = wx.Bitmap( os.path.join(config.icons_path, bmp) )
else:
# a wx.ART_... constant
bmp = wx.ArtProvider.GetBitmap(bmp, wx.ART_TOOLBAR, size)
return ADD(-1, _(label), bmp, wx.NullBitmap, itemtype, _(msg), _(msg_long or msg))
def create_toolbar(self):
# new, open, save, generate, add, delete, re-do, Layout 1, 2, 3, pin, help
# insert slot/page?
# Layout: Alt + 1,2,3
self.toolbar = tb = wx.ToolBar(self, -1)
self.SetToolBar(tb)
size = (21,21)
add = functools.partial(self._add_label_tool, tb, size)
t = add( wx.ID_NEW, "New", wx.ART_NEW, wx.ITEM_NORMAL, "Start a new file (Ctrl+N)")
self.Bind(wx.EVT_TOOL, self.new_app, t)
t = add( wx.ID_OPEN, "Open", wx.ART_FILE_OPEN, wx.ITEM_NORMAL, "Open an existing file (Ctrl+O)")
self.Bind(wx.EVT_TOOL, self.open_app, t)
t = add( wx.ID_SAVE, "Save", wx.ART_FILE_SAVE, wx.ITEM_NORMAL, "Save file (Ctrl+S)")
self.Bind(wx.EVT_TOOL, self.save_app, t)
if config.debugging and hasattr(wx, "ART_PLUS"):
t = add( -1, "Add", wx.ART_PLUS, wx.ITEM_NORMAL, "Add widget (Ctrl+A)")
t.Enable(False)
# XXX switch between wx.ART_DELETE for filled slots and wx.ART_MINUS for empty slots
t = add( -1, "Remove", wx.ART_MINUS, wx.ITEM_NORMAL, "Add widget (Ctrl+A)")
t.Enable(False)
tb.AddSeparator()
self._tool_undo = t = add( wx.ID_UNDO, "Un-do", wx.ART_UNDO, wx.ITEM_NORMAL, "Un-do (Ctrl+Z)" )
t.Enable(False)
self.Bind(wx.EVT_TOOL, lambda evt: common.history.undo(misc.focused_widget), t)
self._tool_redo = t = add( wx.ID_REDO, "Re-do", wx.ART_REDO, wx.ITEM_NORMAL, "Re-do (Ctrl+Y)" )
t.Enable(False)
self.Bind(wx.EVT_TOOL, lambda evt: common.history.redo(misc.focused_widget), t)
self._tool_repeat = t = add( -1, "Repeat", wx.ART_REDO, wx.ITEM_NORMAL, "Repeat (Ctrl+R)" )
t.Enable(False)
self.Bind(wx.EVT_TOOL, lambda evt: common.history.repeat(misc.focused_widget), t)
tb.AddSeparator()
t = add(-1, "Generate Code", wx.ART_EXECUTABLE_FILE, wx.ITEM_NORMAL, "Generate Code (Ctrl+G)" )
self.Bind(wx.EVT_TOOL, lambda event: common.root.generate_code(), t)
tb.AddSeparator()
t1 = add(-1, "Layout 1", "layout1.png", wx.ITEM_RADIO, "Switch layout: Tree",
"Switch layout: Palette and Properties left, Tree right")
self.Bind(wx.EVT_TOOL, lambda event: self.switch_layout(0), t1)
t2 = add(-1, "Layout 2", "layout2.png", wx.ITEM_RADIO,"Switch layout: Properties",
"Switch layout: Palette and Tree top, Properties bottom")
self.Bind(wx.EVT_TOOL, lambda event: self.switch_layout(1), t2)
t3 = add(-1, "Layout 3", "layout3.png", wx.ITEM_RADIO, "Switch layout: narrow",
"Switch layout: Palette, Tree and Properties on top of each other")
self.Bind(wx.EVT_TOOL, lambda event: self.switch_layout(2), t3)
self._layout_tools = [t1,t2,t3]
tb.AddSeparator()
t = add(-1, "Pin Design Window", "pin_design.png", wx.ITEM_CHECK, "Pin Design Window",
"Pin Design Window to stay on top")
self.Bind(wx.EVT_TOOL, lambda event: self.pin_design_window(), t)
self._t_pin_design_window = t
tb.AddSeparator()
t = add(wx.ID_HELP, "Help", wx.ART_HELP_BOOK, wx.ITEM_NORMAL, "Show manual (F1)")
self.Bind(wx.EVT_TOOL, self.show_manual, t)
self.toolbar.Realize()
def init_autosave(self):
# ALB 2004-10-15, autosave support...
self.autosave_timer = None
if not config.preferences.autosave: return
self.autosave_timer = wx.Timer(self, -1)
self.Bind(wx.EVT_TIMER, self.on_autosave_timer, self.autosave_timer)
self.autosave_timer.Start( int(config.preferences.autosave_delay) * 1000 )
def on_autosave_timer(self, event):
res = common.autosave_current()
if res == 2:
self.user_message(_("Auto saving... done"))
elif not res:
self.autosave_timer.Stop()
config.preferences.autosave = False
logging.info(_('Disable autosave function permanently'))
wx.MessageBox(
_('The autosave function failed. It has been disabled\n'
'permanently due to this error. Use the preferences\n'
'dialog to re-enable this functionality.\n'
'The details have been written to the wxGlade log file\n\n'
'The log file is: %s' % config.log_file ),
_('Autosave Failed'), wx.OK | wx.CENTRE | wx.ICON_ERROR )
def check_autosaved(self):
if not common.check_autosaved(None): return
res = wx.MessageBox(
_('There seems to be auto saved data from last wxGlade session: do you want to restore it?'),
_('Auto save detected'), style=wx.ICON_QUESTION | wx.YES_NO)
if res == wx.YES:
filename = common.get_name_for_autosave()
if self._open_app(filename, add_to_history=False):
self.cur_dir = os.path.dirname(filename)
common.root.saved = False
common.root.filename = None
self.user_message(_('Auto save loaded'))
common.remove_autosaved()
def edit_preferences(self):
dialog = preferencesdialog.wxGladePreferences(config.preferences)
if dialog.ShowModal() == wx.ID_OK:
wx.MessageBox( _('Changes will take effect after wxGlade is restarted'),
_('Preferences saved'), wx.OK|wx.CENTRE|wx.ICON_INFORMATION )
dialog.set_preferences()
dialog.Destroy()
def _get_toplevel(self):
# return the toplevel for a preview or design window
if misc.focused_widget and not isinstance(misc.focused_widget, application.Application):
# a widget is selected, find the toplevel window for it
return misc.focused_widget.toplevel_parent
# find main toplevel window
return common.root._get_top_window()
def preview(self):
"""Generate preview of the current loaded project.
A preview can be triggered by keyboard shortcut or by pressing the preview button.
The preview can be triggered for all selected widgets.
This doesn't mean that the widget is opened for editing."""
toplevel = self._get_toplevel()
if toplevel is not None:
toplevel.preview(refresh=True)
def show_palette(self):
if self.IsIconized(): self.Iconize(False)
self.palette.SetFocus()
def show_tree(self):
if self.IsIconized(): self.Iconize(False)
common.app_tree.SetFocus()
def show_props_window(self, section=None):
# XXX implement: if a section is active already, then go to first property of the page
if not self.property_panel.notebook: return
if self.IsIconized(): self.Iconize(False)
self.property_panel.pagenames
if not section:
self.property_panel.notebook.SetFocus()
else:
if not section in self.property_panel.pagenames:
return
i = self.property_panel.pagenames.index(section)
if self.property_panel.notebook.GetSelection() != i:
self.property_panel.notebook.ChangeSelection(i)
else:
self.property_panel.notebook.SetFocus()
# try to set the focus if the widget has changed; this is not yet implemented for many property types
widget = self.property_panel.current_widget
if ( widget and (widget is not np.current_property.owner) and
section in widget.PROPERTIES ):
i_p = widget.PROPERTIES.index(section) + 1
if i_p<len(widget.PROPERTIES):
prop = widget.properties.get(widget.PROPERTIES[i_p])
if prop: prop.set_focus()
self.Raise()
def show_design_window(self):
toplevel = self._get_toplevel()
if not toplevel: return
if toplevel.widget:
focus = toplevel.widget.FindFocus()
focused = focus and focus.GetTopLevelParent() is toplevel.widget.GetTopLevelParent()
if toplevel.widget and toplevel.widget.IsShownOnScreen() and not focused:
# just raise it
if toplevel.widget.GetTopLevelParent().IsIconized():
toplevel.widget.GetTopLevelParent().Iconize(False)
toplevel.widget.GetTopLevelParent().Raise()
return
# open or close
common.app_tree.show_toplevel(None, editor=toplevel)
def pin_design_window(self):
common.pin_design_window = not common.pin_design_window
if common.pin_design_window != self._t_pin_design_window.IsToggled():
self._t_pin_design_window.Toggle()
self.toolbar.Realize()
self._m_pin_design_window.Check(common.pin_design_window)
toplevel = self._get_toplevel()
if not toplevel or not toplevel.widget: return
frame = toplevel.widget.GetTopLevelParent()
if not isinstance(frame, wx.Frame): return
style = frame.GetWindowStyle()
if common.pin_design_window:
frame.SetWindowStyle( style | wx.STAY_ON_TOP)
elif style & wx.STAY_ON_TOP:
frame.ToggleWindowStyle(wx.STAY_ON_TOP)
if wx.Platform=='__WXMSW__':
frame.Iconize(True)
frame.Iconize(False)
else:
toplevel.widget.Raise()
def create_shell_window(self):
common.shell = ShellFrame(None)
if misc.focused_widget: common.shell.txt_path.SetValue( misc.focused_widget.get_path() )
common.shell.Show()
# status bar for message display ###################################################################################
def create_statusbar(self):
self.CreateStatusBar(1)
# ALB 2004-10-15 statusbar timer: delete user message after some time
self.clear_sb_timer = wx.Timer(self, -1)
self.Bind(wx.EVT_TIMER, self.on_clear_sb_timer, self.clear_sb_timer)
def user_message(self, msg):
# display a message, but clear it after a few seconds again
sb = self.GetStatusBar()
if sb:
sb.SetStatusText(msg)
if msg:
self.clear_sb_timer.Start(5000, True)
def on_clear_sb_timer(self, event):
sb = self.GetStatusBar()
if sb: sb.SetStatusText("")
####################################################################################################################
def ask_save(self):
"""checks whether the current app has changed and needs to be saved:
if so, prompts the user;
returns False if the operation has been cancelled"""
if not common.root.saved:
ok = wx.MessageBox(_("Save changes to the current app?"),
_("Confirm"), wx.YES_NO|wx.CANCEL|wx.CENTRE|wx.ICON_QUESTION)
if ok == wx.YES:
self.save_app()
return ok != wx.CANCEL
return True
def new_app(self, event=None):
"creates a new wxGlade project"
if not self.ask_save(): return
common.root.clear()
common.root.new()
common.root.filename = None
self.user_message("")
misc.rebuild_tree(common.root)
common.root.saved = True
common.remove_autosaved()
if config.preferences.autosave and self.autosave_timer is not None:
self.autosave_timer.Start()
def new_app_from_template(self):
"creates a new wxGlade project from an existing template file"
if not self.ask_save(): return
infile = template.select_template()
if infile:
self._open_app(infile, add_to_history=False)
common.root.template_data = None
def open_app(self, event=None):
"""loads a wxGlade project from an xml file
NOTE: this is very slow and needs optimisation efforts
NOTE2: the note above should not be True anymore :) """
if not self.ask_save():
return
default_path = os.path.dirname(common.root.filename or "") or self.cur_dir
infile = wx.FileSelector(_("Open file"),
wildcard="wxGlade files (*.wxg)|*.wxg|wxGlade Template files (*.wgt)|*.wgt|"
"XML files (*.xml)|*.xml|All files|*",
flags=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST, default_path=default_path)
if not infile: return
self._open(infile)
def open_from_history(self, event):
if not self.ask_save():
return
pos = event.GetId() - wx.ID_FILE1
filename = self.file_history.GetHistoryFile(pos)
if not os.path.exists(filename):
wx.MessageBox( _("The file %s doesn't exist.") % filename,
_('Information'), style=wx.CENTER | wx.ICON_INFORMATION | wx.OK )
self.file_history.RemoveFileFromHistory(pos)
common.remove_autosaved(filename)
return
self._open(filename)
def _open(self, filename):
# called by open_app and open_from_history
if common.check_autosaved(filename):
res = wx.MessageBox( _('There seems to be auto saved data for this file: do you want to restore it?'),
_('Auto save detected'), style=wx.ICON_QUESTION | wx.YES_NO )
if res == wx.YES:
common.restore_from_autosaved(filename)
else:
common.remove_autosaved(filename)
else:
common.remove_autosaved(filename)
path = position = None
if filename == common.root.filename:
# if we are re-loading the file, store path and position
if misc.focused_widget:
path = misc.focused_widget.get_path()
if misc.focused_widget.widget is not None and not misc.focused_widget.IS_ROOT:
toplevel = misc.get_toplevel_parent(misc.focused_widget.widget)
if toplevel: position = toplevel.GetPosition()
self._open_app(filename)
self.cur_dir = os.path.dirname(filename)
if not path: return
editor = common.root.find_widget_from_path(path)
if not editor: return
misc.set_focused_widget(editor)
if editor is common.root: return
editor.toplevel_parent.create()
common.app_tree.ExpandAllChildren(editor.item)
if not position or not editor.widget: return
misc.get_toplevel_parent(editor.widget).SetPosition(position)
def _open_app(self, filename, use_progress_dialog=True, add_to_history=True):
"Load a new wxGlade project"
error_msg = None
infile = None
start = time.time()
common.root.clear()
common.root.init()
common.app_tree.DeleteChildren(common.root.item)
common.app_tree.auto_expand = False # disable auto-expansion of nodes
try:
try:
logging.info( _('Read wxGlade project from file "%s"'), filename )
input_file_version = None
if not isinstance(filename, list):
common.root.filename = filename
# decoding will done automatically by SAX XML library
if compat.PYTHON2:
infile = open(filename)
else:
infile = open(filename, "r", encoding="UTF8")
if hasattr(infile, "seek"):
# try to read file version number from the first few lines
import re
version_re = re.compile(r"<!-- generated by wxGlade (\d+)\.(\d+)\.(\d+)(\S*)\s*")
for n in range(3):
match = version_re.match( infile.readline() )
if match:
major, minor, sub, extension = match.groups()
input_file_version = (int(major), int(minor), int(sub), extension)
break
infile.seek(0)
else:
common.root.filename = None
if use_progress_dialog and config.preferences.show_progress:
p = ProgressXmlWidgetBuilder(filename, input_file_version, input_file=infile)
else:
p = XmlWidgetBuilder(filename, input_file_version)
if infile is not None:
p.parse(infile)
else:
p.parse_string(filename)
filename = None
except (EnvironmentError, SAXParseException, XmlParsingError) as msg:
if config.debugging: raise
if infile is not None:
error_msg = _("Error loading file %s:\n%s") % (misc.wxstr(filename), misc.wxstr(msg))
else:
error_msg = _("Error loading from a file-like object:\n%s") % misc.wxstr(msg)
except Exception as inst:
if config.debugging: raise
if filename and not isinstance(filename, list):
fn = os.path.basename(filename).encode('ascii','replace')
msg = _('loading file "%s"') % fn
else:
msg = _('loading from a file-like object')
bugdialog.Show(msg, inst)
finally:
if infile and filename:
infile.close()
if error_msg:
common.root.clear()
common.root.new()
common.root.saved = True
if common.history: common.history.reset()
common.app_tree.auto_expand = True # re-enable auto-expansion of nodes
wx.MessageBox(error_msg, _('Error'), wx.OK | wx.CENTRE | wx.ICON_ERROR)
return False
misc.rebuild_tree(common.root, freeze=True)
common.app_tree.auto_expand = True # re-enable auto-expansion of nodes
common.app_tree.Expand(common.root.item)
if common.root.is_template:
logging.info(_("Template loaded"))
common.root.template_data = template.Template(filename)
common.root.filename = None
end = time.time()
logging.info(_('Loading time: %.5f'), end - start)
common.root.saved = True
if common.history: common.history.reset()
#common.property_panel.Raise()
if hasattr(self, 'file_history') and filename is not None and add_to_history and \
(not common.root.is_template):
self.file_history.AddFileToHistory(misc.wxstr(filename))
if config.preferences.autosave and self.autosave_timer is not None:
self.autosave_timer.Start()
duration = end - start
if filename:
self.user_message( _("Loaded %s in %.2f seconds") % (misc.wxstr(os.path.basename(filename)), duration) )
else:
self.user_message( _("Loaded in %.2f seconds") % duration )
return True
def save_app(self, event=None):
"saves a wxGlade project onto an xml file"
np.flush_current_property()
if not common.root.filename or common.root.is_template:
self.save_app_as()
else:
# check whether we are saving a template
ext = os.path.splitext(common.root.filename)[1].lower()
if ext == ".wgt":
common.root.is_template = True
self._save_app(common.root.filename)
def _save_app(self, filename):
try:
obuffer = []
common.root.write(obuffer)
common.save_file(filename, obuffer, 'wxg')
except EnvironmentError as inst:
if config.debugging: raise
common.root.saved = False
bugdialog.ShowEnvironmentError(_('Saving this project failed'), inst)
except Exception as inst:
if config.debugging: raise
common.root.saved = False
fn = os.path.basename(filename).encode('ascii', 'replace')
bugdialog.Show(_('Save File "%s"') % fn, inst)
else:
common.root.saved = True
common.remove_autosaved()
if config.preferences.autosave and self.autosave_timer is not None:
self.autosave_timer.Start()
self.user_message( _("Saved %s") % os.path.basename(filename) )
def save_app_as(self):
"saves a wxGlade project onto an xml file chosen by the user"
if common.root.filename:
default_path, default_filename = os.path.split(common.root.filename)
else:
default_path, default_filename = self.cur_dir, "wxglade.wxg"
fn = wx.FileSelector( _("Save project as..."),
wildcard="wxGlade files (*.wxg)|*.wxg|wxGlade Template files (*.wgt) |*.wgt|"
"XML files (*.xml)|*.xml|All files|*",
flags=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
default_path=default_path,
default_filename=default_filename)
if not fn: return
# check for file extension and add default extension if missing
ext = os.path.splitext(fn)[1].lower()
if not ext:
fn = "%s.wxg" % fn
common.root.filename = fn
#remove the template flag so we can save the file.
common.root.properties["is_template"].set(False)
self.save_app()
self.cur_dir = os.path.dirname(fn)
self.file_history.AddFileToHistory(fn)
def save_app_as_template(self):
"save a wxGlade project as a template"
data = getattr(common.root, 'template_data', None)
outfile, data = template.save_template(data)
if outfile:
common.root.properties["is_template"].set(True)
common.root.template_data = data
self._save_app(outfile)
def on_close(self, event):
if not event.CanVeto():
event.Skip
return
if self.ask_save():
# close application
# first, let's see if we have to save the geometry...
prefs = config.preferences
if prefs.remember_geometry:
self._store_layout()
prefs.set_dict("layout", self.layout_settings)
prefs.changed = True
common.root.clear()
common.root.new()
try:
common.save_preferences()
except Exception as e:
wx.MessageBox( _('Error saving preferences:\n%s') % e,
_('Error'), wx.OK|wx.CENTRE|wx.ICON_ERROR )
self.Destroy()
common.remove_autosaved()
wx.CallAfter(wx.GetApp().ExitMainLoop)
elif event.CanVeto():
event.Veto()
def show_about_box(self):
"show the about dialog; see: about.wxGladeAboutBox"
about_box = about.wxGladeAboutBox()
about_box.ShowModal()
about_box.Destroy()
def show_manual(self, event=None):
"Show the wxGlade user manual"
self._show_html(config.manual_file)
def show_bug_tracker(self):
self._show_html("https://github.com/wxGlade/wxGlade/issues")
def show_mailing_list(self):
self._show_html("https://sourceforge.net/p/wxglade/mailman/wxglade-general/")
def show_releases(self):
self._show_html("https://github.com/wxGlade/wxGlade/releases")
def _show_html(self, html_file):
"Open browser and show an HTML documentation"
if wx.Platform == "__WXMAC__":
os.system(r'open -a Safari.app "%s"' % html_file)
else:
import webbrowser, threading
# ALB 2004-08-15: why did this block the program????? (at least on linux - GTK)
def go():
webbrowser.open_new(html_file)
t = threading.Thread(target=go)
t.setDaemon(True)
t.start()
def show_and_raise(self):
self.property_panel.Show() # self.GetMenuBar().IsChecked(self.PROPS_ID))
self.tree_panel.Show() # self.GetMenuBar().IsChecked(self.TREE_ID))
self.property_panel.Raise()
self.tree_panel.Raise()
self.Raise()
def hide_all(self):
self.tree_panel.Hide()
self.property_panel.Hide()
def import_xrc(self, infilename=None, ask_save=True):
import xrc2wxg
if ask_save and not self.ask_save():
return
if not infilename:
infilename = wx.FileSelector( _("Import file"), wildcard="XRC files (*.xrc)" "|*.xrc|All files|*",
flags=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST, default_path=self.cur_dir)
if infilename:
ibuffer = []
try:
xrc2wxg.convert(infilename, ibuffer)
# Convert UTF-8 returned by xrc2wxg.convert() to Unicode
tmp = b"".join(ibuffer).decode('UTF-8')
ibuffer = ['%s\n'%line for line in tmp.split('\n')]
self._open_app(ibuffer)
common.root.saved = False
except Exception as inst:
fn = os.path.basename(infilename).encode('ascii', 'replace')
bugdialog.Show(_('Import File "%s"') % fn, inst)
def manage_templates(self):
to_edit = template.manage_templates()
if to_edit is not None and self.ask_save():
# edit the template
# TODO, you still need to save it manually...
self._open_app(to_edit, add_to_history=False)
wx.MessageBox( _("To save the changes to the template, edit the GUI as usual,\n"
"and then click File->Save As Template..."),
_("Information"), style=wx.OK|wx.ICON_INFORMATION )
####################################################################################################################
# user interface helpers
def _set_icon(self):
icon = compat.wx_EmptyIcon()
bmp = wx.Bitmap( os.path.join(config.icons_path, "icon.png") )
icon.CopyFromBitmap(bmp)
self.SetIcon(icon)
def init_layout_settings(self):
# either load from file or init with defaults
display_area = wx.Display(0).ClientArea
default_pos = display_area.TopLeft
height = display_area.height
width = 800
default_size = (width,height)
self.layout_settings = {}
self.layout_settings["layout"] = 0
self.layout_settings["sash_positions"] = [[400, 380 ], # 0: palette and properties left; tree right
[height//2,400 ], # 1: palette and tree top; properties bottom
[2*height//3,height//3] ] # 2: all on top of each other
self.layout_settings["widths"] = [width,500] # for layouts 0/1 and 2
self.layout_settings["height"] = height
self.layout_settings["x"], self.layout_settings["y"] = default_pos
if not config.preferences.remember_geometry:
return default_pos, default_size, 0
# read from preferences
try:
layout = config.preferences.get_int("layout", "layout")
x = config.preferences.get_int("layout", "x")
y = config.preferences.get_int("layout", "y")
widths = [config.preferences.get_int("layout", "widths_l0"),
config.preferences.get_int("layout", "widths_l1")]
width = widths[0] if layout<2 else widths[1]
height = config.preferences.get_int("layout", "height")
sash_positions = [[config.preferences.get_int("layout", "sash_positions_l0_l0"),
config.preferences.get_int("layout", "sash_positions_l0_l1")],
[config.preferences.get_int("layout", "sash_positions_l1_l0"),
config.preferences.get_int("layout", "sash_positions_l1_l1")],
[config.preferences.get_int("layout", "sash_positions_l2_l0"),
config.preferences.get_int("layout", "sash_positions_l2_l1")]]
except:
return default_pos, default_size, 0
if layout<0 or layout>2 or not self._check_geometry(x, y, width, height):
return default_pos, default_size, 0
self.layout_settings["height"] = height
self.layout_settings["sash_positions"] = sash_positions
self.layout_settings["widths"] = widths
return (x,y), (widths[0],height), layout # return widths[0] as 0 is the initial setting
def switch_layout(self, new_layout, initial=False):
if new_layout != self.layout_settings["layout"]:
# set the splitters
if not initial: self._store_layout()
self.splitter2.Unsplit()
self.splitter1.Unsplit()
if new_layout==0:
self.property_panel.Reparent(self.splitter2)
self.palette.Reparent(self.splitter2)
self.tree.Reparent(self.splitter1)
self.splitter1.SplitVertically(self.splitter2, self.tree)
self.splitter2.SplitHorizontally(self.palette, self.property_panel)
elif new_layout==1:
self.property_panel.Reparent(self.splitter1)
self.palette.Reparent(self.splitter2)
self.tree.Reparent(self.splitter2)
self.splitter1.SplitHorizontally(self.splitter2, self.property_panel)
self.splitter2.SplitVertically(self.palette, self.tree)
elif new_layout==2:
self.property_panel.Reparent(self.splitter1)
self.palette.Reparent(self.splitter2)
self.tree.Reparent(self.splitter2)
self.splitter1.SplitHorizontally(self.splitter2, self.property_panel)
self.splitter2.SplitHorizontally(self.palette, self.tree)
if self.layout_settings["layout"] in (0,1) and new_layout==2:
self.SetSize( (self.layout_settings["widths"][1], self.GetSize()[1]) )
elif self.layout_settings["layout"]==2 and new_layout in (0,1):
self.SetSize( (self.layout_settings["widths"][0], self.GetSize()[1]) )
self.layout_settings["layout"] = new_layout
# display in toolbar
t = self._layout_tools[new_layout]
if not t.IsToggled(): t.Toggle()
self.toolbar.Realize()
# set splitter sash positions
if new_layout==0:
self.splitter2.SetMinimumPaneSize(1)
self.splitter2.SetSashGravity(0)
self.splitter1.SetMinimumPaneSize(2)
self.splitter1.SetSashGravity(0)
elif new_layout==1:
self.splitter2.SetMinimumPaneSize(1)
self.splitter2.SetSashGravity(0)
self.splitter1.SetMinimumPaneSize(2)
self.splitter1.SetSashGravity(0.5)
elif new_layout==2:
self.splitter2.SetMinimumPaneSize(1)
self.splitter2.SetSashGravity(0)
self.splitter1.SetMinimumPaneSize(2)
self.splitter1.SetSashGravity(0.5)
positions = self.layout_settings["sash_positions"][new_layout]
self.splitter1.SetSashPosition( positions[0] )
self.splitter2.SetSashPosition( positions[1] )
def _store_layout(self):
# store position, size and splitter sash positions
self.layout_settings["x"], self.layout_settings["y"] = self.GetPosition()
layout = self.layout_settings["layout"]
self.layout_settings["sash_positions"][layout] = [self.splitter1.GetSashPosition(),
self.splitter2.GetSashPosition()]
width, height = self.GetSize()
if layout in (0,1):
self.layout_settings["widths"][0] = width
else:
self.layout_settings["widths"][1] = width
self.layout_settings["height"] = height
def _check_geometry(self, x,y,width,height):
# check whether a significant part would be visible
# also, at least a part of the top edge needs to be visible
if width<150 or height<150: return False
# check top line
top_line = wx.Rect(x,y,width,1)
min_visible = int(round(width/3))
top_line_OK = False
for d in range(wx.Display.GetCount()):
display = wx.Display(d)
client_area = display.ClientArea
if not client_area.width or not client_area.height:
# the display info is broken on some installations
continue
intersection = client_area.Intersect(top_line)
if intersection.width>=min_visible:
top_line_OK = True
break
if not top_line_OK: return False
# check rectangle
geometry = wx.Rect(x,y,width,height)
for d in range(wx.Display.GetCount()):
display = wx.Display(d)
client_area = display.ClientArea
if not client_area.width or not client_area.height:
# the display info is broken on some installations
continue
intersection = client_area.Intersect(geometry)
if intersection.width>150 and intersection.height>150 or geometry.width==-1 or geometry.height==-1:
return True
return False
class wxGlade(wx.App):
"""wxGlade application class
_exception_orig: Reference to original implementation of logging.exception()"""
def OnInit(self):
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# replace text based exception handler by a graphical exception dialog
sys.excepthook = self.graphical_exception_handler
# use graphical implementation to show caught exceptions
self._exception_orig = logging.exception # Reference to original implementation of logging.exception()
logging.exception = self.exception
# needed for wx >= 2.3.4 to disable wxPyAssertionError exceptions
if not config.debugging:
self.SetAssertMode(0)
common.init_preferences()
self.locale = wx.Locale(wx.LANGUAGE_DEFAULT) # avoid PyAssertionErrors
#compat.wx_ArtProviderPush(wxGladeArtProvider())
frame = wxGladeFrame()
self.SetTopWindow(frame)
self.SetExitOnFrameDelete(True)
self.init_idle()
if config.inform_screen_reader:
message = ("It seems you have a screen reader software installed.\n"
"Please be aware that there are some options to improve wxGlade accessibility\n"
"with screen readers.\n"
"See menu Edit -> Preferences -> Accessibility.")
wx.CallLater(1000, wx.MessageBox, message, "Accessibility Info")
return True
def OnExit(self):
"Restore original exception handler and logging.exception() on exit"
sys.excepthook = sys.__excepthook__
logging.exception = self._exception_orig
return 0
def init_idle(self):
if wx.Platform == "__WXMAC__" and compat.IS_CLASSIC:
# it seems that EVT_IDLE makes wx.CallAfter stall from time to time, so we use a timer
wx.CallLater(200, self.OnIdle)
else:
self.Bind(wx.EVT_IDLE, self.OnIdle)
def OnIdle(self, event=None):
"Idle tasks - currently show error messages only; see: show_msgdialog()"
try:
self.show_msgdialog()
finally:
if wx.Platform == "__WXMAC__" and compat.IS_CLASSIC:
wx.CallLater(200, self.OnIdle)
def show_msgdialog(self):
"""
Check for log messages and show them
see: main.wxGlade.OnIdle(), log.getBufferAsList(), msgdialog.MessageDialog"""
log_msg = log.getBufferAsString()
if not log_msg:
return
# initialise message dialog
msg_dialog = msgdialog.MessageDialog(None, -1, "")
msg_dialog.msg_list.InsertColumn(0, "")
# clear dialog and show new messages
msg_dialog.msg_list.Freeze()
msg_dialog.msg_list.DeleteAllItems()
for line in log_msg.split('\n'):
msg_dialog.msg_list.Append([line, ])
msg_dialog.msg_list.SetColumnWidth(0, -1)
msg_dialog.msg_list.Thaw()
msg_dialog.ShowModal()
msg_dialog.Destroy()
def graphical_exception_handler(self, exc_type, exc_value, exc_tb):
"""Show detailed information about uncaught exceptions in bugdialog.BugReport.
The shown exception will be logged to the log file in parallel.
The exception information will be cleared after the bug dialog has closed.
exc_type: Type of the exception (normally a class object)
exc_value: The "value" of the exception
exc_tb: Call stack of the exception
see: bugdialog.BugReport(), bugdialog.Show()"""
bugdialog.ShowEI(exc_type, exc_value, exc_tb)
if compat.PYTHON2: sys.exc_clear()
def exception(self, msg, *args, **kwargs):
"""Graphical replacement of logging.exception().
All exception details logged with logging.exception() will be shown in bugdialog.BugReport.
The shown exception will be logged to the log file ding.
The exception information will be cleared after the bug dialog has closed.
msg: Short description of the exception
see: bugdialog.BugReport, bugdialog.ShowEI()"""
if args:
try:
msg = msg % args
except TypeError:
log.exception_orig(_('Wrong format of a log message'))
(exc_type, exc_value, exc_tb) = sys.exc_info()
bugdialog.ShowEI(exc_type, exc_value, exc_tb, msg)
if compat.PYTHON2: sys.exc_clear()
def main(filename=None):
"if filename is not None, loads it"
logging.info(_("Using wxPython %s"), config.wx_version)
common.history = history.History()
app = wxGlade()
if filename is not None:
win = app.GetTopWindow()
if os.path.splitext(filename)[1].upper() == ".XRC":
win.import_xrc(filename)
else:
win._open_app(filename, False)
# mainly for debugging we want the first window to be opened already
if filename and config.open_design_window and common.root.children:
editor = common.root.children[0]
misc.set_focused_widget(editor)
editor.create()
common.app_tree.ExpandAllChildren(editor.item)
win.cur_dir = os.path.dirname(filename)
#win = app.GetTopWindow()
##win.import_xrc(r"D:\Python\Sources35\wxglade\wxglade_dev\tests\casefiles\CalendarCtrl.xrc")
#win.import_xrc(r"D:\Python\Sources35\wxglade\wxglade_dev\tests\casefiles\AllWidgets_30.xrc")
app.MainLoop()
|
_run.py
|
from profil3r.core.colors import Colors
import threading
def run(self):
self.print_logo()
# Get arguments from the command line
self.parse_arguments()
self.menu()
self.get_permutations()
# Number of permutations to test per service
print(Colors.BOLD + "[+]" + Colors.ENDC + " {} permutations to test for each service, you can reduce this number by selecting less options if it takes too long".format(len(self.permutations_list)))
modules = self.get_report_modules()
print("\n" + "Profil3r will search : \n " + Colors.BOLD + "[+] " + Colors.ENDC + "{} \n".format(str('\n ' + Colors.BOLD + "[+] " + Colors.ENDC).join(modules)))
for module in modules:
thread = threading.Thread(target=self.modules[module]["method"])
thread.start()
thread.join()
self.generate_report()
|
api.py
|
#!/usr/bin/python3 -OO
# Copyright 2007-2019 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.api - api
"""
import os
import logging
import re
import datetime
import time
import json
import cherrypy
import locale
from threading import Thread
try:
import win32api
import win32file
except ImportError:
pass
import sabnzbd
from sabnzbd.constants import VALID_ARCHIVES, VALID_NZB_FILES, Status, \
TOP_PRIORITY, REPAIR_PRIORITY, HIGH_PRIORITY, NORMAL_PRIORITY, LOW_PRIORITY, \
KIBI, MEBI, GIGI, JOB_ADMIN
import sabnzbd.config as config
import sabnzbd.cfg as cfg
from sabnzbd.downloader import Downloader
from sabnzbd.nzbqueue import NzbQueue
import sabnzbd.scheduler as scheduler
from sabnzbd.skintext import SKIN_TEXT
from sabnzbd.utils.pathbrowser import folders_at_path
from sabnzbd.utils.getperformance import getcpu
from sabnzbd.misc import loadavg, to_units, int_conv, time_format, \
cat_convert, create_https_certificates, calc_age
from sabnzbd.filesystem import diskspace, get_ext, get_filename, globber, \
globber_full, clip_path, remove_all
from sabnzbd.filesystem import same_file
from sabnzbd.encoding import xml_name
from sabnzbd.postproc import PostProcessor
from sabnzbd.articlecache import ArticleCache
from sabnzbd.utils.servertests import test_nntp_server_dict
from sabnzbd.bpsmeter import BPSMeter
from sabnzbd.rating import Rating
from sabnzbd.getipaddress import localipv4, publicipv4, ipv6, addresslookup
from sabnzbd.newsunpack import userxbit
from sabnzbd.database import build_history_info, unpack_history_info, HistoryDB
import sabnzbd.notifier
import sabnzbd.rss
import sabnzbd.emailer
##############################################################################
# API error messages
##############################################################################
_MSG_NO_VALUE = 'expect one parameter'
_MSG_NO_VALUE2 = 'expect two parameters'
_MSG_INT_VALUE = 'expect integer value'
_MSG_NO_ITEM = 'item does not exist'
_MSG_NOT_IMPLEMENTED = 'not implemented'
_MSG_NO_FILE = 'no file given'
_MSG_NO_PATH = 'file does not exist'
_MSG_OUTPUT_FORMAT = 'Format not supported'
_MSG_NO_SUCH_CONFIG = 'Config item does not exist'
_MSG_BAD_SERVER_PARMS = 'Incorrect server settings'
# For Windows: determine executable extensions
if os.name == 'nt':
PATHEXT = os.environ.get('PATHEXT', '').lower().split(';')
else:
PATHEXT = []
def api_handler(kwargs):
""" API Dispatcher """
mode = kwargs.get('mode', '')
output = kwargs.get('output')
name = kwargs.get('name', '')
if isinstance(mode, list):
mode = mode[0]
if isinstance(output, list):
output = output[0]
response = _api_table.get(mode, (_api_undefined, 2))[0](name, output, kwargs)
return response
def _api_get_config(name, output, kwargs):
""" API: accepts output, keyword, section """
_, data = config.get_dconfig(kwargs.get('section'), kwargs.get('keyword'))
return report(output, keyword='config', data=data)
def _api_set_config(name, output, kwargs):
""" API: accepts output, keyword, section """
if kwargs.get('section') == 'servers':
kwargs['keyword'] = handle_server_api(output, kwargs)
elif kwargs.get('section') == 'rss':
kwargs['keyword'] = handle_rss_api(output, kwargs)
elif kwargs.get('section') == 'categories':
kwargs['keyword'] = handle_cat_api(output, kwargs)
else:
res = config.set_config(kwargs)
if not res:
return report(output, _MSG_NO_SUCH_CONFIG)
config.save_config()
res, data = config.get_dconfig(kwargs.get('section'), kwargs.get('keyword'))
return report(output, keyword='config', data=data)
def _api_set_config_default(name, output, kwargs):
""" API: Reset requested config variables back to defaults. Currently only for misc-section """
keywords = kwargs.get('keyword', [])
if not isinstance(keywords, list):
keywords = [keywords]
for keyword in keywords:
item = config.get_config('misc', keyword)
if item:
item.set(item.default())
config.save_config()
return report(output)
def _api_del_config(name, output, kwargs):
""" API: accepts output, keyword, section """
if del_from_section(kwargs):
return report(output)
else:
return report(output, _MSG_NOT_IMPLEMENTED)
def _api_qstatus(name, output, kwargs):
""" API: accepts output """
info, pnfo_list, bytespersec = build_queue()
return report(output, data=info)
def _api_queue(name, output, kwargs):
""" API: Dispatcher for mode=queue """
value = kwargs.get('value', '')
return _api_queue_table.get(name, (_api_queue_default, 2))[0](output, value, kwargs)
def _api_queue_delete(output, value, kwargs):
""" API: accepts output, value """
if value.lower() == 'all':
removed = NzbQueue.do.remove_all(kwargs.get('search'))
return report(output, keyword='', data={'status': bool(removed), 'nzo_ids': removed})
elif value:
items = value.split(',')
delete_all_data = int_conv(kwargs.get('del_files'))
removed = NzbQueue.do.remove_multiple(items, delete_all_data=delete_all_data)
return report(output, keyword='', data={'status': bool(removed), 'nzo_ids': removed})
else:
return report(output, _MSG_NO_VALUE)
def _api_queue_delete_nzf(output, value, kwargs):
""" API: accepts value(=nzo_id), value2(=nzf_id) """
value2 = kwargs.get('value2')
if value and value2:
removed = NzbQueue.do.remove_nzf(value, value2, force_delete=True)
return report(output, keyword='', data={'status': bool(removed), 'nzf_ids': removed})
else:
return report(output, _MSG_NO_VALUE2)
def _api_queue_rename(output, value, kwargs):
""" API: accepts output, value(=old name), value2(=new name), value3(=password) """
value2 = kwargs.get('value2')
value3 = kwargs.get('value3')
if value and value2:
ret = NzbQueue.do.change_name(value, value2, value3)
return report(output, keyword='', data={'status': ret})
else:
return report(output, _MSG_NO_VALUE2)
def _api_queue_change_complete_action(output, value, kwargs):
""" API: accepts output, value(=action) """
sabnzbd.change_queue_complete_action(value)
return report(output)
def _api_queue_purge(output, value, kwargs):
""" API: accepts output """
removed = NzbQueue.do.remove_all(kwargs.get('search'))
return report(output, keyword='', data={'status': bool(removed), 'nzo_ids': removed})
def _api_queue_pause(output, value, kwargs):
""" API: accepts output, value(=list of nzo_id) """
if value:
items = value.split(',')
handled = NzbQueue.do.pause_multiple_nzo(items)
else:
handled = False
return report(output, keyword='', data={'status': bool(handled), 'nzo_ids': handled})
def _api_queue_resume(output, value, kwargs):
""" API: accepts output, value(=list of nzo_id) """
if value:
items = value.split(',')
handled = NzbQueue.do.resume_multiple_nzo(items)
else:
handled = False
return report(output, keyword='', data={'status': bool(handled), 'nzo_ids': handled})
def _api_queue_priority(output, value, kwargs):
""" API: accepts output, value(=nzo_id), value2(=priority) """
value2 = kwargs.get('value2')
if value and value2:
try:
try:
priority = int(value2)
except:
return report(output, _MSG_INT_VALUE)
pos = NzbQueue.do.set_priority(value, priority)
# Returns the position in the queue, -1 is incorrect job-id
return report(output, keyword='position', data=pos)
except:
return report(output, _MSG_NO_VALUE2)
else:
return report(output, _MSG_NO_VALUE2)
def _api_queue_sort(output, value, kwargs):
""" API: accepts output, sort, dir """
sort = kwargs.get('sort')
direction = kwargs.get('dir', '')
if sort:
NzbQueue.do.sort_queue(sort, direction)
return report(output)
else:
return report(output, _MSG_NO_VALUE2)
def _api_queue_default(output, value, kwargs):
""" API: accepts output, sort, dir, start, limit """
start = int_conv(kwargs.get('start'))
limit = int_conv(kwargs.get('limit'))
search = kwargs.get('search')
info, pnfo_list, bytespersec = build_queue(start=start, limit=limit, output=output, search=search)
return report(output, keyword='queue', data=info)
def _api_queue_rating(output, value, kwargs):
""" API: accepts output, value(=nzo_id), type, setting, detail """
vote_map = {'up': Rating.VOTE_UP, 'down': Rating.VOTE_DOWN}
flag_map = {'spam': Rating.FLAG_SPAM, 'encrypted': Rating.FLAG_ENCRYPTED, 'expired': Rating.FLAG_EXPIRED, 'other': Rating.FLAG_OTHER, 'comment': Rating.FLAG_COMMENT}
content_type = kwargs.get('type')
setting = kwargs.get('setting')
if value:
try:
video = audio = vote = flag = None
if content_type == 'video' and setting != "-":
video = setting
if content_type == 'audio' and setting != "-":
audio = setting
if content_type == 'vote':
vote = vote_map[setting]
if content_type == 'flag':
flag = flag_map[setting]
if cfg.rating_enable():
Rating.do.update_user_rating(value, video, audio, vote, flag, kwargs.get('detail'))
return report(output)
except:
return report(output, _MSG_BAD_SERVER_PARMS)
else:
return report(output, _MSG_NO_VALUE)
def _api_options(name, output, kwargs):
""" API: accepts output """
return options_list(output)
def _api_translate(name, output, kwargs):
""" API: accepts output, value(=acronym) """
return report(output, keyword='value', data=T(kwargs.get('value', '')))
def _api_addfile(name, output, kwargs):
""" API: accepts name, output, pp, script, cat, priority, nzbname """
# Normal upload will send the nzb in a kw arg called nzbfile
if name is None or isinstance(name, str):
name = kwargs.get('nzbfile')
if hasattr(name, 'getvalue'):
# Side effect of next line is that attribute .value is created
# which is needed to make add_nzbfile() work
size = name.length
elif hasattr(name, 'file') and hasattr(name, 'filename') and name.filename:
# CherryPy 3.2.2 object
if hasattr(name.file, 'file'):
name.value = name.file.file.read()
else:
name.value = name.file.read()
size = len(name.value)
elif hasattr(name, 'value'):
size = len(name.value)
else:
size = 0
if name is not None and size and name.filename:
cat = kwargs.get('cat')
xcat = kwargs.get('xcat')
if not cat and xcat:
# Indexer category, so do mapping
cat = cat_convert(xcat)
res = sabnzbd.add_nzbfile(name, kwargs.get('pp'), kwargs.get('script'), cat,
kwargs.get('priority'), kwargs.get('nzbname'))
return report(output, keyword='', data={'status': res[0] == 0, 'nzo_ids': res[1]}, compat=True)
else:
return report(output, _MSG_NO_VALUE)
def _api_retry(name, output, kwargs):
""" API: accepts name, output, value(=nzo_id), nzbfile(=optional NZB), password (optional) """
value = kwargs.get('value')
# Normal upload will send the nzb in a kw arg called nzbfile
if name is None or isinstance(name, str):
name = kwargs.get('nzbfile')
password = kwargs.get('password')
password = password[0] if isinstance(password, list) else password
nzo_id = retry_job(value, name, password)
if nzo_id:
if isinstance(nzo_id, list):
nzo_id = nzo_id[0]
return report(output, keyword='', data={'status': True, 'nzo_id': nzo_id})
else:
return report(output, _MSG_NO_ITEM)
def _api_cancel_pp(name, output, kwargs):
""" API: accepts name, output, value(=nzo_id) """
nzo_id = kwargs.get('value')
if PostProcessor.do.cancel_pp(nzo_id):
return report(output, keyword='', data={'status': True, 'nzo_id': nzo_id})
else:
return report(output, _MSG_NO_ITEM)
def _api_addlocalfile(name, output, kwargs):
""" API: accepts name, output, pp, script, cat, priority, nzbname """
if name and isinstance(name, list):
name = name[0]
if name:
if os.path.exists(name):
fn = get_filename(name)
if fn:
pp = kwargs.get('pp')
script = kwargs.get('script')
cat = kwargs.get('cat')
xcat = kwargs.get('xcat')
if not cat and xcat:
# Indexer category, so do mapping
cat = cat_convert(xcat)
priority = kwargs.get('priority')
nzbname = kwargs.get('nzbname')
if get_ext(name) in VALID_ARCHIVES:
res = sabnzbd.dirscanner.process_nzb_archive_file(
fn, name, pp=pp, script=script, cat=cat, priority=priority, keep=True, nzbname=nzbname)
elif get_ext(name) in VALID_NZB_FILES:
res = sabnzbd.dirscanner.process_single_nzb(
fn, name, pp=pp, script=script, cat=cat, priority=priority, keep=True, nzbname=nzbname)
else:
logging.info('API-call addlocalfile: "%s" not a proper file name', name)
return report(output, _MSG_NO_FILE)
else:
logging.info('API-call addlocalfile: file "%s" not found', name)
return report(output, _MSG_NO_PATH)
return report(output, keyword='', data={'status': res[0] == 0, 'nzo_ids': res[1]}, compat=True)
else:
logging.info('API-call addlocalfile: no file name given')
return report(output, _MSG_NO_VALUE)
def _api_switch(name, output, kwargs):
""" API: accepts output, value(=first id), value2(=second id) """
value = kwargs.get('value')
value2 = kwargs.get('value2')
if value and value2:
pos, prio = NzbQueue.do.switch(value, value2)
# Returns the new position and new priority (if different)
return report(output, keyword='result', data={'position': pos, 'priority': prio})
else:
return report(output, _MSG_NO_VALUE2)
def _api_change_cat(name, output, kwargs):
""" API: accepts output, value(=nzo_id), value2(=category) """
value = kwargs.get('value')
value2 = kwargs.get('value2')
if value and value2:
nzo_id = value
cat = value2
if cat == 'None':
cat = None
result = NzbQueue.do.change_cat(nzo_id, cat)
return report(output, keyword='status', data=bool(result > 0))
else:
return report(output, _MSG_NO_VALUE)
def _api_change_script(name, output, kwargs):
""" API: accepts output, value(=nzo_id), value2(=script) """
value = kwargs.get('value')
value2 = kwargs.get('value2')
if value and value2:
nzo_id = value
script = value2
if script.lower() == 'none':
script = None
result = NzbQueue.do.change_script(nzo_id, script)
return report(output, keyword='status', data=bool(result > 0))
else:
return report(output, _MSG_NO_VALUE)
def _api_change_opts(name, output, kwargs):
""" API: accepts output, value(=nzo_id), value2(=pp) """
value = kwargs.get('value')
value2 = kwargs.get('value2')
result = 0
if value and value2 and value2.isdigit():
result = NzbQueue.do.change_opts(value, int(value2))
return report(output, keyword='status', data=bool(result > 0))
def _api_fullstatus(name, output, kwargs):
""" API: full history status"""
status = build_status(skip_dashboard=kwargs.get('skip_dashboard', 1), output=output)
return report(output, keyword='status', data=status)
def _api_history(name, output, kwargs):
""" API: accepts output, value(=nzo_id), start, limit, search """
value = kwargs.get('value', '')
start = int_conv(kwargs.get('start'))
limit = int_conv(kwargs.get('limit'))
last_history_update = int_conv(kwargs.get('last_history_update', 0))
search = kwargs.get('search')
failed_only = kwargs.get('failed_only')
categories = kwargs.get('category')
# Do we need to send anything?
if last_history_update == sabnzbd.LAST_HISTORY_UPDATE:
return report(output, keyword='history', data=False)
if categories and not isinstance(categories, list):
categories = [categories]
if not limit:
limit = cfg.history_limit()
if name == 'delete':
special = value.lower()
del_files = bool(int_conv(kwargs.get('del_files')))
if special in ('all', 'failed', 'completed'):
history_db = sabnzbd.get_db_connection()
if special in ('all', 'failed'):
if del_files:
del_job_files(history_db.get_failed_paths(search))
history_db.remove_failed(search)
if special in ('all', 'completed'):
history_db.remove_completed(search)
sabnzbd.history_updated()
return report(output)
elif value:
jobs = value.split(',')
for job in jobs:
del_hist_job(job, del_files)
sabnzbd.history_updated()
return report(output)
else:
return report(output, _MSG_NO_VALUE)
elif not name:
history = {}
grand, month, week, day = BPSMeter.do.get_sums()
history['total_size'], history['month_size'], history['week_size'], history['day_size'] = \
to_units(grand), to_units(month), to_units(week), to_units(day)
history['slots'], fetched_items, history['noofslots'] = build_history(start=start,
limit=limit,
search=search, failed_only=failed_only,
categories=categories,
output=output)
history['last_history_update'] = sabnzbd.LAST_HISTORY_UPDATE
history['version'] = sabnzbd.__version__
return report(output, keyword='history', data=history)
else:
return report(output, _MSG_NOT_IMPLEMENTED)
def _api_get_files(name, output, kwargs):
""" API: accepts output, value(=nzo_id) """
value = kwargs.get('value')
if value:
return report(output, keyword='files', data=build_file_list(value))
else:
return report(output, _MSG_NO_VALUE)
def _api_addurl(names, output, kwargs):
""" API: accepts name, output, pp, script, cat, priority, nzbname """
pp = kwargs.get('pp')
script = kwargs.get('script')
cat = kwargs.get('cat')
priority = kwargs.get('priority')
nzbnames = kwargs.get('nzbname')
if not isinstance(names, list):
names = [names]
if not isinstance(nzbnames, list):
nzbnames = [nzbnames]
nzo_ids = []
for n in range(len(names)):
name = names[n]
if n < len(nzbnames):
nzbname = nzbnames[n]
else:
nzbname = ''
if name:
name = name.strip()
if name:
nzo_ids.append(sabnzbd.add_url(name, pp, script, cat, priority, nzbname))
if len(names) > 0:
return report(output, keyword='', data={'status': True, 'nzo_ids': nzo_ids}, compat=True)
else:
logging.info('API-call addurl: no files retrieved from %s', names)
return report(output, _MSG_NO_VALUE)
def _api_pause(name, output, kwargs):
""" API: accepts output """
scheduler.plan_resume(0)
Downloader.do.pause()
return report(output)
def _api_resume(name, output, kwargs):
""" API: accepts output """
scheduler.plan_resume(0)
sabnzbd.unpause_all()
return report(output)
def _api_shutdown(name, output, kwargs):
""" API: accepts output """
sabnzbd.shutdown_program()
return report(output)
def _api_warnings(name, output, kwargs):
""" API: accepts name, output """
if name == 'clear':
return report(output, keyword="warnings", data=sabnzbd.GUIHANDLER.clear())
elif name == 'show':
return report(output, keyword="warnings", data=sabnzbd.GUIHANDLER.content())
elif name:
return report(output, _MSG_NOT_IMPLEMENTED)
return report(output, keyword="warnings", data=sabnzbd.GUIHANDLER.content())
def _api_get_cats(name, output, kwargs):
""" API: accepts output """
return report(output, keyword="categories", data=list_cats(False))
def _api_get_scripts(name, output, kwargs):
""" API: accepts output """
return report(output, keyword="scripts", data=list_scripts())
def _api_version(name, output, kwargs):
""" API: accepts output """
return report(output, keyword='version', data=sabnzbd.__version__)
def _api_auth(name, output, kwargs):
""" API: accepts output """
auth = 'None'
if not cfg.disable_key():
auth = 'badkey'
key = kwargs.get('key', '')
if not key:
auth = 'apikey'
else:
if key == cfg.nzb_key():
auth = 'nzbkey'
if key == cfg.api_key():
auth = 'apikey'
elif cfg.username() and cfg.password():
auth = 'login'
return report(output, keyword='auth', data=auth)
def _api_restart(name, output, kwargs):
""" API: accepts output """
logging.info('Restart requested by API')
# Do the shutdown async to still send goodbye to browser
Thread(target=sabnzbd.trigger_restart, kwargs={'timeout': 1}).start()
return report(output)
def _api_restart_repair(name, output, kwargs):
""" API: accepts output """
logging.info('Queue repair requested by API')
sabnzbd.request_repair()
sabnzbd.trigger_restart()
return report(output)
def _api_disconnect(name, output, kwargs):
""" API: accepts output """
Downloader.do.disconnect()
return report(output)
def _api_osx_icon(name, output, kwargs):
""" API: accepts output, value """
value = kwargs.get('value', '1').strip()
cfg.osx_menu.set(value != '0')
return report(output)
def _api_rescan(name, output, kwargs):
""" API: accepts output """
NzbQueue.do.scan_jobs(all=False, action=True)
return report(output)
def _api_eval_sort(name, output, kwargs):
""" API: evaluate sorting expression """
name = kwargs.get('name', '')
value = kwargs.get('value', '')
title = kwargs.get('title')
multipart = kwargs.get('movieextra', '')
path = sabnzbd.sorting.eval_sort(value, title, name, multipart)
if path is None:
return report(output, _MSG_NOT_IMPLEMENTED)
else:
return report(output, keyword='result', data=path)
def _api_watched_now(name, output, kwargs):
""" API: accepts output """
sabnzbd.dirscanner.dirscan()
return report(output)
def _api_resume_pp(name, output, kwargs):
""" API: accepts output """
PostProcessor.do.paused = False
return report(output)
def _api_pause_pp(name, output, kwargs):
""" API: accepts output """
PostProcessor.do.paused = True
return report(output)
def _api_rss_now(name, output, kwargs):
""" API: accepts output """
# Run RSS scan async, because it can take a long time
scheduler.force_rss()
return report(output)
def _api_retry_all(name, output, kwargs):
""" API: Retry all failed items in History """
return report(output, keyword='status', data=retry_all_jobs())
def _api_reset_quota(name, output, kwargs):
""" Reset quota left """
BPSMeter.do.reset_quota(force=True)
def _api_test_email(name, output, kwargs):
""" API: send a test email, return result """
logging.info("Sending test email")
pack = {'download': ['action 1', 'action 2'], 'unpack': ['action 1', 'action 2']}
res = sabnzbd.emailer.endjob('I had a d\xe8ja vu', 'unknown', True,
os.path.normpath(os.path.join(cfg.complete_dir.get_path(), '/unknown/I had a d\xe8ja vu')),
123 * MEBI, None, pack, 'my_script', 'Line 1\nLine 2\nLine 3\nd\xe8ja vu\n', 0,
test=kwargs)
if res == T('Email succeeded'):
res = None
return report(output, error=res)
def _api_test_windows(name, output, kwargs):
""" API: send a test to Windows, return result """
logging.info("Sending test notification")
res = sabnzbd.notifier.send_windows('SABnzbd', T('Test Notification'), 'other')
return report(output, error=res)
def _api_test_notif(name, output, kwargs):
""" API: send a test to Notification Center, return result """
logging.info("Sending test notification")
res = sabnzbd.notifier.send_notification_center('SABnzbd', T('Test Notification'), 'other')
return report(output, error=res)
def _api_test_osd(name, output, kwargs):
""" API: send a test OSD notification, return result """
logging.info("Sending OSD notification")
res = sabnzbd.notifier.send_notify_osd('SABnzbd', T('Test Notification'))
return report(output, error=res)
def _api_test_prowl(name, output, kwargs):
""" API: send a test Prowl notification, return result """
logging.info("Sending Prowl notification")
res = sabnzbd.notifier.send_prowl('SABnzbd', T('Test Notification'), 'other', force=True, test=kwargs)
return report(output, error=res)
def _api_test_pushover(name, output, kwargs):
""" API: send a test Pushover notification, return result """
logging.info("Sending Pushover notification")
res = sabnzbd.notifier.send_pushover('SABnzbd', T('Test Notification'), 'other', force=True, test=kwargs)
return report(output, error=res)
def _api_test_pushbullet(name, output, kwargs):
""" API: send a test Pushbullet notification, return result """
logging.info("Sending Pushbullet notification")
res = sabnzbd.notifier.send_pushbullet('SABnzbd', T('Test Notification'), 'other', force=True, test=kwargs)
return report(output, error=res)
def _api_test_nscript(name, output, kwargs):
""" API: execute a test notification script, return result """
logging.info("Executing notification script")
res = sabnzbd.notifier.send_nscript('SABnzbd', T('Test Notification'), 'other', force=True, test=kwargs)
return report(output, error=res)
def _api_undefined(name, output, kwargs):
""" API: accepts output """
return report(output, _MSG_NOT_IMPLEMENTED)
def _api_browse(name, output, kwargs):
""" Return tree of local path """
compact = kwargs.get('compact')
if compact and compact == '1':
name = kwargs.get('term', '')
paths = [entry['path'] for entry in folders_at_path(os.path.dirname(name)) if 'path' in entry]
return report(output, keyword='', data=paths)
else:
show_hidden = kwargs.get('show_hidden_folders')
paths = folders_at_path(name, True, show_hidden)
return report(output, keyword='paths', data=paths)
def _api_config(name, output, kwargs):
""" API: Dispatcher for "config" """
return _api_config_table.get(name, (_api_config_undefined, 2))[0](output, kwargs)
def _api_config_speedlimit(output, kwargs):
""" API: accepts output, value(=speed) """
value = kwargs.get('value')
if not value:
value = '0'
Downloader.do.limit_speed(value)
return report(output)
def _api_config_get_speedlimit(output, kwargs):
""" API: accepts output """
return report(output, keyword='speedlimit', data=Downloader.do.get_limit())
def _api_config_set_colorscheme(output, kwargs):
""" API: accepts output"""
value = kwargs.get('value')
if value:
cfg.web_color.set(value)
return report(output)
else:
return report(output, _MSG_NO_VALUE)
def _api_config_set_pause(output, kwargs):
""" API: accepts output, value(=pause interval) """
value = kwargs.get('value')
scheduler.plan_resume(int_conv(value))
return report(output)
def _api_config_set_apikey(output, kwargs):
""" API: accepts output """
cfg.api_key.set(config.create_api_key())
config.save_config()
return report(output, keyword='apikey', data=cfg.api_key())
def _api_config_set_nzbkey(output, kwargs):
""" API: accepts output """
cfg.nzb_key.set(config.create_api_key())
config.save_config()
return report(output, keyword='nzbkey', data=cfg.nzb_key())
def _api_config_regenerate_certs(output, kwargs):
# Make sure we only over-write default locations
result = False
if sabnzbd.cfg.https_cert() is sabnzbd.cfg.https_cert.default() and sabnzbd.cfg.https_key() is sabnzbd.cfg.https_key.default():
https_cert = sabnzbd.cfg.https_cert.get_path()
https_key = sabnzbd.cfg.https_key.get_path()
result = create_https_certificates(https_cert, https_key)
sabnzbd.RESTART_REQ = True
return report(output, data=result)
def _api_config_test_server(output, kwargs):
""" API: accepts output, server-params """
result, msg = test_nntp_server_dict(kwargs)
response = {'result': result, 'message': msg}
if output:
return report(output, data=response)
else:
return msg
def _api_config_undefined(output, kwargs):
""" API: accepts output """
return report(output, _MSG_NOT_IMPLEMENTED)
def _api_server_stats(name, output, kwargs):
""" API: accepts output """
sum_t, sum_m, sum_w, sum_d = BPSMeter.do.get_sums()
stats = {'total': sum_t, 'month': sum_m, 'week': sum_w, 'day': sum_d, 'servers': {}}
for svr in config.get_servers():
t, m, w, d, daily = BPSMeter.do.amounts(svr)
stats['servers'][svr] = {'total': t or 0, 'month': m or 0, 'week': w or 0, 'day': d or 0, 'daily': daily or {}}
return report(output, keyword='', data=stats)
##############################################################################
_api_table = {
'server_stats': (_api_server_stats, 2),
'get_config': (_api_get_config, 3),
'set_config': (_api_set_config, 3),
'set_config_default': (_api_set_config_default, 3),
'del_config': (_api_del_config, 3),
'qstatus': (_api_qstatus, 2),
'queue': (_api_queue, 2),
'options': (_api_options, 2),
'translate': (_api_translate, 2),
'addfile': (_api_addfile, 1),
'retry': (_api_retry, 2),
'cancel_pp': (_api_cancel_pp, 2),
'addlocalfile': (_api_addlocalfile, 1),
'switch': (_api_switch, 2),
'change_cat': (_api_change_cat, 2),
'change_script': (_api_change_script, 2),
'change_opts': (_api_change_opts, 2),
'fullstatus': (_api_fullstatus, 2),
'history': (_api_history, 2),
'get_files': (_api_get_files, 2),
'addurl': (_api_addurl, 1),
'addid': (_api_addurl, 1),
'pause': (_api_pause, 2),
'resume': (_api_resume, 2),
'shutdown': (_api_shutdown, 3),
'warnings': (_api_warnings, 2),
'config': (_api_config, 2),
'get_cats': (_api_get_cats, 2),
'get_scripts': (_api_get_scripts, 2),
'version': (_api_version, 1),
'auth': (_api_auth, 1),
'restart': (_api_restart, 3),
'restart_repair': (_api_restart_repair, 2),
'disconnect': (_api_disconnect, 2),
'osx_icon': (_api_osx_icon, 3),
'rescan': (_api_rescan, 2),
'eval_sort': (_api_eval_sort, 2),
'watched_now': (_api_watched_now, 2),
'resume_pp': (_api_resume_pp, 2),
'pause_pp': (_api_pause_pp, 2),
'rss_now': (_api_rss_now, 2),
'browse': (_api_browse, 2),
'retry_all': (_api_retry_all, 2),
'reset_quota': (_api_reset_quota, 2),
'test_email': (_api_test_email, 2),
'test_windows': (_api_test_windows, 2),
'test_notif': (_api_test_notif, 2),
'test_osd': (_api_test_osd, 2),
'test_pushover': (_api_test_pushover, 2),
'test_pushbullet': (_api_test_pushbullet, 2),
'test_prowl': (_api_test_prowl, 2),
'test_nscript': (_api_test_nscript, 2),
}
_api_queue_table = {
'delete': (_api_queue_delete, 2),
'delete_nzf': (_api_queue_delete_nzf, 2),
'rename': (_api_queue_rename, 2),
'change_complete_action': (_api_queue_change_complete_action, 2),
'purge': (_api_queue_purge, 2),
'pause': (_api_queue_pause, 2),
'resume': (_api_queue_resume, 2),
'priority': (_api_queue_priority, 2),
'sort': (_api_queue_sort, 2),
'rating': (_api_queue_rating, 2)
}
_api_config_table = {
'speedlimit': (_api_config_speedlimit, 2),
'set_speedlimit': (_api_config_speedlimit, 2),
'get_speedlimit': (_api_config_get_speedlimit, 2),
'set_colorscheme': (_api_config_set_colorscheme, 2),
'set_pause': (_api_config_set_pause, 2),
'set_apikey': (_api_config_set_apikey, 3),
'set_nzbkey': (_api_config_set_nzbkey, 3),
'regenerate_certs': (_api_config_regenerate_certs, 3),
'test_server': (_api_config_test_server, 2)
}
def api_level(cmd, name):
""" Return access level required for this API call """
if cmd in _api_table:
return _api_table[cmd][1]
if name == 'queue' and cmd in _api_queue_table:
return _api_queue_table[cmd][1]
if name == 'config' and cmd in _api_config_table:
return _api_config_table[cmd][1]
return 4
def report(output, error=None, keyword='value', data=None, compat=False):
""" Report message in json, xml or plain text
If error is set, only an status/error report is made.
If no error and no data, only a status report is made.
Else, a data report is made (optional 'keyword' for outer XML section).
'compat' is a special case for compatibility for ascii ouput
"""
if output == 'json':
content = "application/json;charset=UTF-8"
if error:
info = {'status': False, 'error': error}
elif data is None:
info = {'status': True}
else:
if hasattr(data, '__iter__') and not keyword:
info = data
else:
info = {keyword: data}
response = json.dumps(info).encode('utf-8')
elif output == 'xml':
if not keyword:
# xml always needs an outer keyword, even when json doesn't
keyword = 'result'
content = "text/xml"
xmlmaker = xml_factory()
if error:
status_str = xmlmaker.run('result', {'status': False, 'error': error})
elif data is None:
status_str = xmlmaker.run('result', {'status': True})
else:
status_str = xmlmaker.run(keyword, data)
response = '<?xml version="1.0" encoding="UTF-8" ?>\n%s\n' % status_str
else:
content = "text/plain"
if error:
response = "error: %s\n" % error
elif compat or data is None:
response = 'ok\n'
else:
response = '%s\n' % str(data)
cherrypy.response.headers['Content-Type'] = content
cherrypy.response.headers['Pragma'] = 'no-cache'
return response
class xml_factory:
""" Recursive xml string maker. Feed it a mixed tuple/dict/item object and will output into an xml string
Current limitations:
In Two tiered lists hard-coded name of "item": <cat_list><item> </item></cat_list>
In Three tiered lists hard-coded name of "slot": <tier1><slot><tier2> </tier2></slot></tier1>
"""
def __init__(self):
self.__text = ''
def _tuple(self, keyw, lst):
text = []
for item in lst:
text.append(self.run(keyw, item))
return ''.join(text)
def _dict(self, keyw, lst):
text = []
for key in lst.keys():
text.append(self.run(key, lst[key]))
if keyw:
return '<%s>%s</%s>\n' % (keyw, ''.join(text), keyw)
else:
return ''
def _list(self, keyw, lst):
text = []
for cat in lst:
if isinstance(cat, dict):
text.append(self._dict(plural_to_single(keyw, 'slot'), cat))
elif isinstance(cat, list):
text.append(self._list(plural_to_single(keyw, 'list'), cat))
elif isinstance(cat, tuple):
text.append(self._tuple(plural_to_single(keyw, 'tuple'), cat))
else:
if not isinstance(cat, str):
cat = str(cat)
name = plural_to_single(keyw, 'item')
text.append('<%s>%s</%s>\n' % (name, xml_name(cat), name))
if keyw:
return '<%s>%s</%s>\n' % (keyw, ''.join(text), keyw)
else:
return ''
def run(self, keyw, lst):
if isinstance(lst, dict):
text = self._dict(keyw, lst)
elif isinstance(lst, list):
text = self._list(keyw, lst)
elif isinstance(lst, tuple):
text = self._tuple(keyw, lst)
elif keyw:
text = '<%s>%s</%s>\n' % (keyw, xml_name(lst), keyw)
else:
text = ''
return text
def handle_server_api(output, kwargs):
""" Special handler for API-call 'set_config' [servers] """
name = kwargs.get('keyword')
if not name:
name = kwargs.get('name')
if name:
server = config.get_config('servers', name)
if server:
server.set_dict(kwargs)
old_name = name
else:
config.ConfigServer(name, kwargs)
old_name = None
Downloader.do.update_server(old_name, name)
return name
def handle_rss_api(output, kwargs):
""" Special handler for API-call 'set_config' [rss] """
name = kwargs.get('keyword')
if not name:
name = kwargs.get('name')
if not name:
return None
feed = config.get_config('rss', name)
if feed:
feed.set_dict(kwargs)
else:
config.ConfigRSS(name, kwargs)
action = kwargs.get('filter_action')
if action in ('add', 'update'):
# Use the general function, but catch the redirect-raise
try:
kwargs['feed'] = name
sabnzbd.interface.ConfigRss('/').internal_upd_rss_filter(**kwargs)
except cherrypy.HTTPRedirect:
pass
elif action == 'delete':
# Use the general function, but catch the redirect-raise
try:
kwargs['feed'] = name
sabnzbd.interface.ConfigRss('/').internal_del_rss_filter(**kwargs)
except cherrypy.HTTPRedirect:
pass
return name
def handle_cat_api(output, kwargs):
""" Special handler for API-call 'set_config' [categories] """
name = kwargs.get('keyword')
if not name:
name = kwargs.get('name')
if not name:
return None
feed = config.get_config('categories', name)
if feed:
feed.set_dict(kwargs)
else:
config.ConfigCat(name, kwargs)
return name
def build_status(skip_dashboard=False, output=None):
# build up header full of basic information
info = build_header(trans_functions=not output)
info['logfile'] = sabnzbd.LOGFILE
info['weblogfile'] = sabnzbd.WEBLOGFILE
info['loglevel'] = str(cfg.log_level())
info['folders'] = NzbQueue.do.scan_jobs(all=False, action=False)
info['configfn'] = config.get_filename()
# Dashboard: Speed of System
info['cpumodel'] = getcpu()
info['pystone'] = sabnzbd.PYSTONE_SCORE
# Dashboard: Speed of Download directory:
info['downloaddir'] = cfg.download_dir.get_clipped_path()
info['downloaddirspeed'] = sabnzbd.DOWNLOAD_DIR_SPEED
# Dashboard: Speed of Complete directory:
info['completedir'] = cfg.complete_dir.get_clipped_path()
info['completedirspeed'] = sabnzbd.COMPLETE_DIR_SPEED
# Dashboard: Measured download-speed
info['internetbandwidth'] = sabnzbd.INTERNET_BANDWIDTH
# Dashboard: Connection information
if not int_conv(skip_dashboard):
info['localipv4'] = localipv4()
info['publicipv4'] = publicipv4()
info['ipv6'] = ipv6()
# Dashboard: DNS-check
try:
addresslookup(cfg.selftest_host())
info['dnslookup'] = "OK"
except:
info['dnslookup'] = None
info['servers'] = []
servers = sorted(Downloader.do.servers[:], key=lambda svr: '%02d%s' % (svr.priority, svr.displayname.lower()))
for server in servers:
serverconnections = []
connected = 0
for nw in server.idle_threads[:]:
if nw.connected:
connected += 1
for nw in server.busy_threads[:]:
article = nw.article
art_name = ""
nzf_name = ""
nzo_name = ""
if article:
nzf = article.nzf
nzo = nzf.nzo
art_name = article.article
# filename field is not always present
try:
nzf_name = nzf.filename
except: # attribute error
nzf_name = nzf.subject
nzo_name = nzo.final_name
# For the templates or for JSON
if output:
thread_info = {'thrdnum': nw.thrdnum,
'art_name': art_name,
'nzf_name': nzf_name,
'nzo_name': nzo_name}
serverconnections.append(thread_info)
else:
serverconnections.append((nw.thrdnum, art_name, nzf_name, nzo_name))
if nw.connected:
connected += 1
if server.warning and not (connected or server.errormsg):
connected = server.warning
if server.request and not server.info:
connected = T(' Resolving address').replace(' ', '')
# For the templates or for JSON
if output:
server_info = {'servername': server.displayname,
'serveractiveconn': connected,
'servertotalconn': server.threads,
'serverconnections': serverconnections,
'serverssl': server.ssl,
'serversslinfo': server.ssl_info,
'serveractive': server.active,
'servererror': server.errormsg,
'serverpriority': server.priority,
'serveroptional': server.optional}
info['servers'].append(server_info)
else:
info['servers'].append((server.displayname, '', connected, serverconnections, server.ssl,
server.active, server.errormsg, server.priority, server.optional))
info['warnings'] = sabnzbd.GUIHANDLER.content()
return info
def build_queue(start=0, limit=0, trans=False, output=None, search=None):
# build up header full of basic information
info, pnfo_list, bytespersec, q_size, bytes_left_previous_page = build_queue_header(search=search, start=start, limit=limit, output=output)
datestart = datetime.datetime.now()
priorities = {TOP_PRIORITY: 'Force', REPAIR_PRIORITY: 'Repair', HIGH_PRIORITY: 'High', NORMAL_PRIORITY: 'Normal', LOW_PRIORITY: 'Low'}
limit = int_conv(limit)
start = int_conv(start)
info['refresh_rate'] = str(cfg.refresh_rate()) if cfg.refresh_rate() > 0 else ''
info['scripts'] = list_scripts()
info['categories'] = list_cats(output is None)
info['rating_enable'] = bool(cfg.rating_enable())
info['noofslots'] = q_size
info['start'] = start
info['limit'] = limit
info['finish'] = info['start'] + info['limit']
n = start
running_bytes = bytes_left_previous_page
slotinfo = []
for pnfo in pnfo_list:
nzo_id = pnfo.nzo_id
bytesleft = pnfo.bytes_left
bytes = pnfo.bytes
average_date = pnfo.avg_date
is_propagating = (pnfo.avg_stamp + float(cfg.propagation_delay() * 60)) > time.time()
status = pnfo.status
priority = pnfo.priority
mbleft = (bytesleft / MEBI)
mb = (bytes / MEBI)
slot = {'index': n, 'nzo_id': str(nzo_id)}
slot['unpackopts'] = str(sabnzbd.opts_to_pp(pnfo.repair, pnfo.unpack, pnfo.delete))
slot['priority'] = priorities[priority] if priority >= LOW_PRIORITY else priorities[NORMAL_PRIORITY]
slot['script'] = pnfo.script if pnfo.script else 'None'
slot['filename'] = pnfo.filename
slot['password'] = pnfo.password if pnfo.password else ''
slot['cat'] = pnfo.category if pnfo.category else 'None'
slot['mbleft'] = "%.2f" % mbleft
slot['mb'] = "%.2f" % mb
slot['size'] = format_bytes(bytes)
slot['sizeleft'] = format_bytes(bytesleft)
slot['percentage'] = "%s" % (int(((mb - mbleft) / mb) * 100)) if mb != mbleft else '0'
slot['mbmissing'] = "%.2f" % (pnfo.bytes_missing / MEBI)
slot['direct_unpack'] = pnfo.direct_unpack
if not output:
slot['mb_fmt'] = locale.format_string('%d', int(mb), True)
slot['mbdone_fmt'] = locale.format_string('%d', int(mb - mbleft), True)
if not Downloader.do.paused and status not in (Status.PAUSED, Status.FETCHING, Status.GRABBING):
if is_propagating:
slot['status'] = Status.PROP
elif status == Status.CHECKING:
slot['status'] = Status.CHECKING
else:
slot['status'] = Status.DOWNLOADING
else:
# Ensure compatibility of API status
if status == Status.DELETED or priority == TOP_PRIORITY:
status = Status.DOWNLOADING
slot['status'] = "%s" % status
if (Downloader.do.paused or Downloader.do.postproc or is_propagating or
status not in (Status.DOWNLOADING, Status.FETCHING, Status.QUEUED)) and priority != TOP_PRIORITY:
slot['timeleft'] = '0:00:00'
slot['eta'] = 'unknown'
else:
running_bytes += bytesleft
slot['timeleft'] = calc_timeleft(running_bytes, bytespersec)
try:
datestart = datestart + datetime.timedelta(seconds=bytesleft / bytespersec)
# new eta format: 16:00 Fri 07 Feb
slot['eta'] = datestart.strftime(time_format('%H:%M %a %d %b'))
except:
datestart = datetime.datetime.now()
slot['eta'] = 'unknown'
# Do not show age when it's not known
if average_date.year < 2000:
slot['avg_age'] = '-'
else:
slot['avg_age'] = calc_age(average_date, bool(trans))
rating = Rating.do.get_rating_by_nzo(nzo_id)
slot['has_rating'] = rating is not None
if rating:
slot['rating_avg_video'] = rating.avg_video
slot['rating_avg_audio'] = rating.avg_audio
slotinfo.append(slot)
n += 1
if slotinfo:
info['slots'] = slotinfo
else:
info['slots'] = []
return info, pnfo_list, bytespersec
def fast_queue():
""" Return paused, bytes_left, bpsnow, time_left """
bytes_left = NzbQueue.do.remaining()
paused = Downloader.do.paused
bpsnow = BPSMeter.do.bps
time_left = calc_timeleft(bytes_left, bpsnow)
return paused, bytes_left, bpsnow, time_left
def build_file_list(nzo_id):
""" Build file lists for specified job
"""
jobs = []
nzo = NzbQueue.do.get_nzo(nzo_id)
if nzo:
pnfo = nzo.gather_info(full=True)
finished_files = pnfo.finished_files
active_files = pnfo.active_files
queued_files = pnfo.queued_files
for nzf in finished_files:
jobs.append({'filename': nzf.filename if nzf.filename else nzf.subject,
'mbleft': "%.2f" % (nzf.bytes_left / MEBI),
'mb': "%.2f" % (nzf.bytes / MEBI),
'bytes': "%.2f" % nzf.bytes,
'age': calc_age(nzf.date),
'nzf_id': nzf.nzf_id,
'status': 'finished'})
for nzf in active_files:
jobs.append({'filename': nzf.filename if nzf.filename else nzf.subject,
'mbleft': "%.2f" % (nzf.bytes_left / MEBI),
'mb': "%.2f" % (nzf.bytes / MEBI),
'bytes': "%.2f" % nzf.bytes,
'age': calc_age(nzf.date),
'nzf_id': nzf.nzf_id,
'status': 'active'})
for nzf in queued_files:
jobs.append({'filename': nzf.filename if nzf.filename else nzf.subject,
'set': nzf.setname,
'mbleft': "%.2f" % (nzf.bytes_left / MEBI),
'mb': "%.2f" % (nzf.bytes / MEBI),
'bytes': "%.2f" % nzf.bytes,
'age': calc_age(nzf.date),
'nzf_id': nzf.nzf_id,
'status': 'queued'})
return jobs
def options_list(output):
return report(output, keyword='options', data={
'sabyenc': sabnzbd.decoder.SABYENC_ENABLED,
'par2': sabnzbd.newsunpack.PAR2_COMMAND,
'multipar': sabnzbd.newsunpack.MULTIPAR_COMMAND,
'rar': sabnzbd.newsunpack.RAR_COMMAND,
'zip': sabnzbd.newsunpack.ZIP_COMMAND,
'7zip': sabnzbd.newsunpack.SEVEN_COMMAND,
'nice': sabnzbd.newsunpack.NICE_COMMAND,
'ionice': sabnzbd.newsunpack.IONICE_COMMAND
})
def retry_job(job, new_nzb=None, password=None):
""" Re enter failed job in the download queue """
if job:
history_db = sabnzbd.get_db_connection()
futuretype, url, pp, script, cat = history_db.get_other(job)
if futuretype:
nzo_id = sabnzbd.add_url(url, pp, script, cat)
else:
path = history_db.get_path(job)
nzo_id = NzbQueue.do.repair_job(path, new_nzb, password)
if nzo_id:
# Only remove from history if we repaired something
history_db.remove_history(job)
return nzo_id
return None
def retry_all_jobs():
""" Re enter all failed jobs in the download queue """
# Fetch all retryable folders from History
items = sabnzbd.api.build_history()[0]
nzo_ids = []
for item in items:
if item['retry']:
nzo_ids.append(retry_job(item['nzo_id']))
return nzo_ids
def del_job_files(job_paths):
""" Remove files of each path in the list """
for path in job_paths:
if path and clip_path(path).lower().startswith(cfg.download_dir.get_clipped_path().lower()):
remove_all(path, recursive=True)
def del_hist_job(job, del_files):
""" Remove history element """
if job:
path = PostProcessor.do.get_path(job)
if path:
PostProcessor.do.delete(job, del_files=del_files)
else:
history_db = sabnzbd.get_db_connection()
remove_all(history_db.get_path(job), recursive=True)
history_db.remove_history(job)
def Tspec(txt):
""" Translate special terms """
if txt == 'None':
return T('None')
elif txt in ('Default', '*'):
return T('Default')
else:
return txt
_SKIN_CACHE = {} # Stores pre-translated acronyms
def Ttemplate(txt):
""" Translation function for Skin texts
This special is to be used in interface.py for template processing
to be passed for the $T function: so { ..., 'T' : Ttemplate, ...}
"""
global _SKIN_CACHE
if txt in _SKIN_CACHE:
return _SKIN_CACHE[txt]
else:
# We need to remove the " and ' to be JS/JSON-string-safe
# Saving it in dictionary is 20x faster on next look-up
tra = T(SKIN_TEXT.get(txt, txt)).replace('"', '"').replace("'", ''')
_SKIN_CACHE[txt] = tra
return tra
def clear_trans_cache():
""" Clean cache for skin translations """
global _SKIN_CACHE
_SKIN_CACHE = {}
sabnzbd.WEBUI_READY = True
def build_header(webdir='', output=None, trans_functions=True):
""" Build the basic header """
try:
uptime = calc_age(sabnzbd.START)
except:
uptime = "-"
speed_limit = Downloader.do.get_limit()
if speed_limit <= 0:
speed_limit = 100
speed_limit_abs = Downloader.do.get_limit_abs()
if speed_limit_abs <= 0:
speed_limit_abs = ''
diskspace_info = diskspace()
header = {}
# We don't output everything for API
if not output:
# These are functions, and cause problems for JSON
if trans_functions:
header['T'] = Ttemplate
header['Tspec'] = Tspec
header['uptime'] = uptime
header['color_scheme'] = sabnzbd.WEB_COLOR or ''
header['helpuri'] = 'https://sabnzbd.org/wiki/'
header['restart_req'] = sabnzbd.RESTART_REQ
header['pid'] = os.getpid()
header['active_lang'] = cfg.language()
header['my_lcldata'] = clip_path(sabnzbd.DIR_LCLDATA)
header['my_home'] = clip_path(sabnzbd.DIR_HOME)
header['webdir'] = webdir or sabnzbd.WEB_DIR
header['url_base'] = cfg.url_base()
header['nt'] = sabnzbd.WIN32
header['darwin'] = sabnzbd.DARWIN
header['power_options'] = sabnzbd.WIN32 or sabnzbd.DARWIN or sabnzbd.LINUX_POWER
header['pp_pause_event'] = sabnzbd.scheduler.pp_pause_event()
header['session'] = cfg.api_key()
header['new_release'], header['new_rel_url'] = sabnzbd.NEW_VERSION
header['version'] = sabnzbd.__version__
header['paused'] = Downloader.do.paused or Downloader.do.postproc
header['pause_int'] = scheduler.pause_int()
header['paused_all'] = sabnzbd.PAUSED_ALL
header['diskspace1'] = "%.2f" % diskspace_info['download_dir'][1]
header['diskspace2'] = "%.2f" % diskspace_info['complete_dir'][1]
header['diskspace1_norm'] = to_units(diskspace_info['download_dir'][1] * GIGI)
header['diskspace2_norm'] = to_units(diskspace_info['complete_dir'][1] * GIGI)
header['diskspacetotal1'] = "%.2f" % diskspace_info['download_dir'][0]
header['diskspacetotal2'] = "%.2f" % diskspace_info['complete_dir'][0]
header['loadavg'] = loadavg()
header['speedlimit'] = "{1:0.{0}f}".format(int(speed_limit % 1 > 0), speed_limit)
header['speedlimit_abs'] = "%s" % speed_limit_abs
header['have_warnings'] = str(sabnzbd.GUIHANDLER.count())
header['finishaction'] = sabnzbd.QUEUECOMPLETE
header['quota'] = to_units(BPSMeter.do.quota)
header['have_quota'] = bool(BPSMeter.do.quota > 0.0)
header['left_quota'] = to_units(BPSMeter.do.left)
anfo = ArticleCache.do.cache_info()
header['cache_art'] = str(anfo.article_sum)
header['cache_size'] = format_bytes(anfo.cache_size)
header['cache_max'] = str(anfo.cache_limit)
return header
def build_queue_header(search=None, start=0, limit=0, output=None):
""" Build full queue header """
header = build_header(output=output)
bytespersec = BPSMeter.do.bps
qnfo = NzbQueue.do.queue_info(search=search, start=start, limit=limit)
bytesleft = qnfo.bytes_left
bytes = qnfo.bytes
header['kbpersec'] = "%.2f" % (bytespersec / KIBI)
header['speed'] = to_units(bytespersec)
header['mbleft'] = "%.2f" % (bytesleft / MEBI)
header['mb'] = "%.2f" % (bytes / MEBI)
header['sizeleft'] = format_bytes(bytesleft)
header['size'] = format_bytes(bytes)
header['noofslots_total'] = qnfo.q_fullsize
if Downloader.do.paused or Downloader.do.postproc:
status = Status.PAUSED
elif bytespersec > 0:
status = Status.DOWNLOADING
else:
status = 'Idle'
header['status'] = status
header['timeleft'] = calc_timeleft(bytesleft, bytespersec)
try:
datestart = datetime.datetime.now() + datetime.timedelta(seconds=bytesleft / bytespersec)
# new eta format: 16:00 Fri 07 Feb
header['eta'] = datestart.strftime(time_format('%H:%M %a %d %b'))
except:
header['eta'] = T('unknown')
return header, qnfo.list, bytespersec, qnfo.q_fullsize, qnfo.bytes_left_previous_page
def build_history(start=None, limit=None,search=None, failed_only=0, categories=None, output=None):
limit = int_conv(limit)
if not limit:
limit = 1000000
start = int_conv(start)
failed_only = int_conv(failed_only)
def matches_search(text, search_text):
# Replace * with .* and ' ' with .
search_text = search_text.strip().replace('*', '.*').replace(' ', '.*') + '.*?'
try:
re_search = re.compile(search_text, re.I)
except:
logging.error(T('Failed to compile regex for search term: %s'), search_text)
return False
return re_search.search(text)
# Grab any items that are active or queued in postproc
queue = PostProcessor.do.get_queue()
# Filter out any items that don't match the search
if search:
queue = [nzo for nzo in queue if matches_search(nzo.final_name, search)]
# Multi-page support for postproc items
full_queue_size = len(queue)
if start > full_queue_size:
# On a page where we shouldn't show postproc items
queue = []
h_limit = limit
else:
try:
if limit:
queue = queue[start:start + limit]
else:
queue = queue[start:]
except:
pass
# Remove the amount of postproc items from the db request for history items
h_limit = max(limit - len(queue), 0)
h_start = max(start - full_queue_size, 0)
# Aquire the db instance
try:
history_db = sabnzbd.get_db_connection()
close_db = False
except:
# Required for repairs at startup because Cherrypy isn't active yet
history_db = HistoryDB()
close_db = True
# Fetch history items
if not h_limit:
items, fetched_items, total_items = history_db.fetch_history(h_start, 1, search, failed_only, categories)
items = []
else:
items, fetched_items, total_items = history_db.fetch_history(h_start, h_limit, search, failed_only, categories)
# Reverse the queue to add items to the top (faster than insert)
items.reverse()
# Add the postproc items to the top of the history
items = get_active_history(queue, items)
# Unreverse the queue
items.reverse()
for item in items:
item['size'] = format_bytes(item['bytes'])
if 'loaded' not in item:
item['loaded'] = False
path = item.get('path', '')
item['retry'] = int_conv(item.get('status') == Status.FAILED and path and os.path.exists(path))
# Retry of failed URL-fetch
if item['report'] == 'future':
item['retry'] = True
if Rating.do:
rating = Rating.do.get_rating_by_nzo(item['nzo_id'])
else:
rating = None
item['has_rating'] = rating is not None
if rating:
item['rating_avg_video'] = rating.avg_video
item['rating_avg_audio'] = rating.avg_audio
item['rating_avg_vote_up'] = rating.avg_vote_up
item['rating_avg_vote_down'] = rating.avg_vote_down
item['rating_user_video'] = rating.user_video
item['rating_user_audio'] = rating.user_audio
item['rating_user_vote'] = rating.user_vote
total_items += full_queue_size
fetched_items = len(items)
if close_db:
history_db.close()
return items, fetched_items, total_items
def get_active_history(queue=None, items=None):
""" Get the currently in progress and active history queue. """
if items is None:
items = []
if queue is None:
queue = PostProcessor.do.get_queue()
for nzo in queue:
history = build_history_info(nzo)
item = {}
item['completed'], item['name'], item['nzb_name'], item['category'], item['pp'], item['script'], item['report'], \
item['url'], item['status'], item['nzo_id'], item['storage'], item['path'], item['script_log'], \
item['script_line'], item['download_time'], item['postproc_time'], item['stage_log'], \
item['downloaded'], item['completeness'], item['fail_message'], item['url_info'], item['bytes'], \
_, _, item['password'] = history
item['action_line'] = nzo.action_line
item = unpack_history_info(item)
item['loaded'] = nzo.pp_active
if item['bytes']:
item['size'] = format_bytes(item['bytes'])
else:
item['size'] = ''
items.append(item)
return items
def format_bytes(bytes_string):
b = to_units(bytes_string)
if b == '':
return b
else:
return b + 'B'
def calc_timeleft(bytesleft, bps):
""" Calculate the time left in the format HH:MM:SS """
try:
if bytesleft <= 0:
return '0:00:00'
totalseconds = int(bytesleft / bps)
minutes, seconds = divmod(totalseconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
if minutes < 10:
minutes = '0%s' % minutes
if seconds < 10:
seconds = '0%s' % seconds
if days > 0:
if hours < 10:
hours = '0%s' % hours
return '%s:%s:%s:%s' % (days, hours, minutes, seconds)
else:
return '%s:%s:%s' % (hours, minutes, seconds)
except:
return '0:00:00'
def list_scripts(default=False, none=True):
""" Return a list of script names, optionally with 'Default' added """
lst = []
path = cfg.script_dir.get_path()
if path and os.access(path, os.R_OK):
for script in globber_full(path):
if os.path.isfile(script):
if (sabnzbd.WIN32 and os.path.splitext(script)[1].lower() in PATHEXT and
not win32api.GetFileAttributes(script) & win32file.FILE_ATTRIBUTE_HIDDEN) or \
script.endswith('.py') or \
(not sabnzbd.WIN32 and userxbit(script) and not os.path.basename(script).startswith('.')):
lst.append(os.path.basename(script))
if none:
lst.insert(0, 'None')
if default:
lst.insert(0, 'Default')
return lst
def list_cats(default=True):
""" Return list of (ordered) categories,
when default==False use '*' for Default category
"""
lst = [cat['name'] for cat in config.get_ordered_categories()]
if default:
lst.remove('*')
lst.insert(0, 'Default')
return lst
_PLURAL_TO_SINGLE = {
'categories': 'category',
'servers': 'server',
'rss': 'feed',
'scripts': 'script',
'warnings': 'warning',
'files': 'file',
'jobs': 'job'
}
def plural_to_single(kw, def_kw=''):
try:
return _PLURAL_TO_SINGLE[kw]
except KeyError:
return def_kw
def del_from_section(kwargs):
""" Remove keyword in section """
section = kwargs.get('section', '')
if section in ('servers', 'rss', 'categories'):
keyword = kwargs.get('keyword')
if keyword:
item = config.get_config(section, keyword)
if item:
item.delete()
del item
config.save_config()
if section == 'servers':
Downloader.do.update_server(keyword, None)
return True
else:
return False
def history_remove_failed():
""" Remove all failed jobs from history, including files """
logging.info('Scheduled removal of all failed jobs')
history_db = HistoryDB()
del_job_files(history_db.get_failed_paths())
history_db.remove_failed()
history_db.close()
def history_remove_completed():
""" Remove all completed jobs from history """
logging.info('Scheduled removal of all completed jobs')
history_db = HistoryDB()
history_db.remove_completed()
history_db.close()
|
mpc.py
|
import os.path
import re
import time
import threading
import _thread
from functools import wraps
import win32con, win32api, win32gui, ctypes, ctypes.wintypes #@UnresolvedImport @UnusedImport
from syncplay import constants
from syncplay.messages import getMessage
from syncplay.players.basePlayer import BasePlayer
from syncplay.utils import retry
class MpcHcApi:
def __init__(self):
self.callbacks = self.__Callbacks()
self.loadState = None
self.playState = None
self.filePlaying = None
self.fileDuration = None
self.filePath = None
self.lastFilePosition = None
self.version = None
self.__playpause_warden = False
self.__locks = self.__Locks()
self.__mpcExistenceChecking = threading.Thread(target=self.__mpcReadyInSlaveMode, name="Check MPC window")
self.__mpcExistenceChecking.setDaemon(True)
self.__listener = self.__Listener(self, self.__locks)
self.__listener.setDaemon(True)
self.__listener.start()
self.__locks.listenerStart.wait()
def waitForFileStateReady(f): #@NoSelf
@wraps(f)
def wrapper(self, *args, **kwds):
if not self.__locks.fileReady.wait(constants.MPC_LOCK_WAIT_TIME):
raise self.PlayerNotReadyException()
return f(self, *args, **kwds)
return wrapper
def startMpc(self, path, args=()):
args = "%s /slave %s" % (" ".join(args), str(self.__listener.hwnd))
win32api.ShellExecute(0, "open", path, args, None, 1)
if not self.__locks.mpcStart.wait(constants.MPC_OPEN_MAX_WAIT_TIME):
raise self.NoSlaveDetectedException(getMessage("mpc-slave-error"))
self.__mpcExistenceChecking.start()
def openFile(self, filePath):
self.__listener.SendCommand(self.CMD_OPENFILE, filePath)
def isPaused(self):
return self.playState != self.__MPC_PLAYSTATE.PS_PLAY and self.playState is not None
def askForVersion(self):
self.__listener.SendCommand(self.CMD_GETVERSION)
@waitForFileStateReady
def pause(self):
self.__listener.SendCommand(self.CMD_PAUSE)
@waitForFileStateReady
def playPause(self):
self.__listener.SendCommand(self.CMD_PLAYPAUSE)
@waitForFileStateReady
def unpause(self):
self.__listener.SendCommand(self.CMD_PLAY)
@waitForFileStateReady
def askForCurrentPosition(self):
self.__listener.SendCommand(self.CMD_GETCURRENTPOSITION)
@waitForFileStateReady
def seek(self, position):
self.__listener.SendCommand(self.CMD_SETPOSITION, str(position))
@waitForFileStateReady
def setSpeed(self, rate):
self.__listener.SendCommand(self.CMD_SETSPEED, str(rate))
def sendOsd(self, message, MsgPos=constants.MPC_OSD_POSITION, DurationMs=(constants.OSD_DURATION*1000)):
class __OSDDATASTRUCT(ctypes.Structure):
_fields_ = [
('nMsgPos', ctypes.c_int32),
('nDurationMS', ctypes.c_int32),
('strMsg', ctypes.c_wchar * (len(message.encode('utf-8')) + 1))
]
cmessage = __OSDDATASTRUCT()
cmessage.nMsgPos = MsgPos
cmessage.nDurationMS = DurationMs
cmessage.strMsg = message
self.__listener.SendCommand(self.CMD_OSDSHOWMESSAGE, cmessage)
def sendRawCommand(self, cmd, value):
self.__listener.SendCommand(cmd, value)
def handleCommand(self, cmd, value):
if cmd == self.CMD_CONNECT:
self.__listener.mpcHandle = int(value)
self.__locks.mpcStart.set()
if self.callbacks.onConnected:
_thread.start_new_thread(self.callbacks.onConnected, ())
elif cmd == self.CMD_STATE:
self.loadState = int(value)
fileNotReady = (
self.loadState == self.__MPC_LOADSTATE.MLS_CLOSING or
self.loadState == self.__MPC_LOADSTATE.MLS_LOADING or
self.loadState == self.__MPC_LOADSTATE.MLS_CLOSED
)
if fileNotReady:
self.playState = None
self.__locks.fileReady.clear()
else:
self.__locks.fileReady.set()
if self.callbacks.onFileStateChange:
_thread.start_new_thread(self.callbacks.onFileStateChange, (self.loadState,))
elif cmd == self.CMD_PLAYMODE:
self.playState = int(value)
if self.callbacks.onUpdatePlaystate:
_thread.start_new_thread(self.callbacks.onUpdatePlaystate, (self.playState,))
elif cmd == self.CMD_NOWPLAYING:
value = re.split(r'(?<!\\)\|', value)
if self.filePath == value[3]:
return
self.filePath = value[3]
self.filePlaying = value[3].split('\\').pop()
self.fileDuration = float(value[4])
if self.callbacks.onUpdatePath:
_thread.start_new_thread(self.callbacks.onUpdatePath, (self.onUpdatePath,))
if self.callbacks.onUpdateFilename:
_thread.start_new_thread(self.callbacks.onUpdateFilename, (self.filePlaying,))
if self.callbacks.onUpdateFileDuration:
_thread.start_new_thread(self.callbacks.onUpdateFileDuration, (self.fileDuration,))
elif cmd == self.CMD_CURRENTPOSITION:
self.lastFilePosition = float(value)
if self.callbacks.onGetCurrentPosition:
_thread.start_new_thread(self.callbacks.onGetCurrentPosition, (self.lastFilePosition,))
elif cmd == self.CMD_NOTIFYSEEK:
if self.lastFilePosition != float(value): # Notify seek is sometimes sent twice
self.lastFilePosition = float(value)
if self.callbacks.onSeek:
_thread.start_new_thread(self.callbacks.onSeek, (self.lastFilePosition,))
elif cmd == self.CMD_DISCONNECT:
if self.callbacks.onMpcClosed:
_thread.start_new_thread(self.callbacks.onMpcClosed, (None,))
elif cmd == self.CMD_VERSION:
if self.callbacks.onVersion:
self.version = value
_thread.start_new_thread(self.callbacks.onVersion, (value,))
class PlayerNotReadyException(Exception):
pass
class __Callbacks:
def __init__(self):
self.onConnected = None
self.onSeek = None
self.onUpdatePath = None
self.onUpdateFilename = None
self.onUpdateFileDuration = None
self.onGetCurrentPosition = None
self.onUpdatePlaystate = None
self.onFileStateChange = None
self.onMpcClosed = None
self.onVersion = None
class __Locks:
def __init__(self):
self.listenerStart = threading.Event()
self.mpcStart = threading.Event()
self.fileReady = threading.Event()
def __mpcReadyInSlaveMode(self):
while True:
time.sleep(10)
if not win32gui.IsWindow(self.__listener.mpcHandle):
if self.callbacks.onMpcClosed:
self.callbacks.onMpcClosed(None)
break
CMD_CONNECT = 0x50000000
CMD_STATE = 0x50000001
CMD_PLAYMODE = 0x50000002
CMD_NOWPLAYING = 0x50000003
CMD_LISTSUBTITLETRACKS = 0x50000004
CMD_LISTAUDIOTRACKS = 0x50000005
CMD_CURRENTPOSITION = 0x50000007
CMD_NOTIFYSEEK = 0x50000008
CMD_NOTIFYENDOFSTREAM = 0x50000009
CMD_PLAYLIST = 0x50000006
CMD_OPENFILE = 0xA0000000
CMD_STOP = 0xA0000001
CMD_CLOSEFILE = 0xA0000002
CMD_PLAYPAUSE = 0xA0000003
CMD_ADDTOPLAYLIST = 0xA0001000
CMD_CLEARPLAYLIST = 0xA0001001
CMD_STARTPLAYLIST = 0xA0001002
CMD_REMOVEFROMPLAYLIST = 0xA0001003 # TODO
CMD_SETPOSITION = 0xA0002000
CMD_SETAUDIODELAY = 0xA0002001
CMD_SETSUBTITLEDELAY = 0xA0002002
CMD_SETINDEXPLAYLIST = 0xA0002003 # DOESNT WORK
CMD_SETAUDIOTRACK = 0xA0002004
CMD_SETSUBTITLETRACK = 0xA0002005
CMD_GETSUBTITLETRACKS = 0xA0003000
CMD_GETCURRENTPOSITION = 0xA0003004
CMD_JUMPOFNSECONDS = 0xA0003005
CMD_GETAUDIOTRACKS = 0xA0003001
CMD_GETNOWPLAYING = 0xA0003002
CMD_GETPLAYLIST = 0xA0003003
CMD_TOGGLEFULLSCREEN = 0xA0004000
CMD_JUMPFORWARDMED = 0xA0004001
CMD_JUMPBACKWARDMED = 0xA0004002
CMD_INCREASEVOLUME = 0xA0004003
CMD_DECREASEVOLUME = 0xA0004004
CMD_SHADER_TOGGLE = 0xA0004005
CMD_CLOSEAPP = 0xA0004006
CMD_OSDSHOWMESSAGE = 0xA0005000
CMD_VERSION = 0x5000000A
CMD_DISCONNECT = 0x5000000B
CMD_PLAY = 0xA0000004
CMD_PAUSE = 0xA0000005
CMD_GETVERSION = 0xA0003006
CMD_SETSPEED = 0xA0004008
class __MPC_LOADSTATE:
MLS_CLOSED = 0
MLS_LOADING = 1
MLS_LOADED = 2
MLS_CLOSING = 3
class __MPC_PLAYSTATE:
PS_PLAY = 0
PS_PAUSE = 1
PS_STOP = 2
PS_UNUSED = 3
class __Listener(threading.Thread):
def __init__(self, mpcApi, locks):
self.__mpcApi = mpcApi
self.locks = locks
self.mpcHandle = None
self.hwnd = None
self.__PCOPYDATASTRUCT = ctypes.POINTER(self.__COPYDATASTRUCT)
threading.Thread.__init__(self, name="MPC Listener")
def run(self):
message_map = {
win32con.WM_COPYDATA: self.OnCopyData
}
wc = win32gui.WNDCLASS()
wc.lpfnWndProc = message_map
wc.lpszClassName = 'MPCApiListener'
hinst = wc.hInstance = win32api.GetModuleHandle(None)
classAtom = win32gui.RegisterClass(wc)
self.hwnd = win32gui.CreateWindow(
classAtom,
"ListenerGUI",
0,
0,
0,
win32con.CW_USEDEFAULT,
win32con.CW_USEDEFAULT,
0,
0,
hinst,
None
)
self.locks.listenerStart.set()
win32gui.PumpMessages()
def OnCopyData(self, hwnd, msg, wparam, lparam):
pCDS = ctypes.cast(lparam, self.__PCOPYDATASTRUCT)
# print "API:\tin>\t 0x%X\t" % int(pCDS.contents.dwData), ctypes.wstring_at(pCDS.contents.lpData)
self.__mpcApi.handleCommand(pCDS.contents.dwData, ctypes.wstring_at(pCDS.contents.lpData))
def SendCommand(self, cmd, message=''):
# print "API:\t<out\t 0x%X\t" % int(cmd), message
if not win32gui.IsWindow(self.mpcHandle):
if self.__mpcApi.callbacks.onMpcClosed:
self.__mpcApi.callbacks.onMpcClosed(None)
cs = self.__COPYDATASTRUCT()
cs.dwData = cmd;
if isinstance(message, str):
message = ctypes.create_unicode_buffer(message, len(message) + 1)
elif isinstance(message, ctypes.Structure):
pass
else:
raise TypeError
cs.lpData = ctypes.addressof(message)
cs.cbData = ctypes.sizeof(message)
ptr = ctypes.addressof(cs)
win32api.SendMessage(self.mpcHandle, win32con.WM_COPYDATA, self.hwnd, ptr)
class __COPYDATASTRUCT(ctypes.Structure):
_fields_ = [
('dwData', ctypes.wintypes.LPARAM),
('cbData', ctypes.wintypes.DWORD),
('lpData', ctypes.c_void_p)
]
class MPCHCAPIPlayer(BasePlayer):
speedSupported = False
alertOSDSupported = False
customOpenDialog = False
chatOSDSupported = False
osdMessageSeparator = "; "
def __init__(self, client):
from twisted.internet import reactor
self.reactor = reactor
self.__client = client
self._mpcApi = MpcHcApi()
self._mpcApi.callbacks.onUpdateFilename = lambda _: self.__makePing()
self._mpcApi.callbacks.onMpcClosed = lambda _: self.reactor.callFromThread(self.__client.stop, False,)
self._mpcApi.callbacks.onFileStateChange = lambda _: self.__lockAsking()
self._mpcApi.callbacks.onUpdatePlaystate = lambda _: self.__unlockAsking()
self._mpcApi.callbacks.onGetCurrentPosition = lambda _: self.__onGetPosition()
self._mpcApi.callbacks.onVersion = lambda _: self.__versionUpdate.set()
self.__switchPauseCalls = False
self.__preventAsking = threading.Event()
self.__positionUpdate = threading.Event()
self.__versionUpdate = threading.Event()
self.__fileUpdate = threading.RLock()
self.__versionUpdate.clear()
@staticmethod
def getMinVersionErrorMessage():
return getMessage("mpc-version-insufficient-error").format(constants.MPC_MIN_VER)
def drop(self):
self.__preventAsking.set()
self.__positionUpdate.set()
self.__versionUpdate.set()
self._mpcApi.sendRawCommand(MpcHcApi.CMD_CLOSEAPP, "")
@staticmethod
def getPlayerPathErrors(playerPath, filePath):
return None
@staticmethod
def run(client, playerPath, filePath, args):
args.extend(['/open', '/new'])
mpc = MPCHCAPIPlayer(client)
mpc._mpcApi.callbacks.onConnected = lambda: mpc.initPlayer(filePath if filePath else None)
mpc._mpcApi.startMpc(MPCHCAPIPlayer.getExpandedPath(playerPath), args)
client.initPlayer(mpc)
return mpc
def __lockAsking(self):
self.__preventAsking.clear()
def __unlockAsking(self):
self.__preventAsking.set()
def __onGetPosition(self):
self.__positionUpdate.set()
def setSpeed(self, value):
try:
self._mpcApi.setSpeed(value)
except MpcHcApi.PlayerNotReadyException:
self.setSpeed(value)
def __dropIfNotSufficientVersion(self):
self._mpcApi.askForVersion()
if not self.__versionUpdate.wait(0.1) or not self._mpcApi.version:
self.reactor.callFromThread(self.__client.ui.showErrorMessage, self.getMinVersionErrorMessage(), True)
self.reactor.callFromThread(self.__client.stop, True)
def __testMpcReady(self):
if not self.__preventAsking.wait(10):
raise Exception(getMessage("player-file-open-error"))
def __makePing(self):
try:
self.__testMpcReady()
self._mpcApi.callbacks.onUpdateFilename = lambda _: self.__handleUpdatedFilename()
self.__handleUpdatedFilename()
self.askForStatus()
except Exception as err:
self.reactor.callFromThread(self.__client.ui.showErrorMessage, err.message, True)
self.reactor.callFromThread(self.__client.stop)
def initPlayer(self, filePath):
self.__dropIfNotSufficientVersion()
if not self._mpcApi.version:
return
self.__mpcVersion = self._mpcApi.version.split('.')
if self.__mpcVersion[0:3] == ['1', '6', '4']:
self.__switchPauseCalls = True
if filePath:
self.openFile(filePath)
def openFile(self, filePath, resetPosition=False):
self._mpcApi.openFile(filePath)
if resetPosition:
self.setPosition(0)
def displayMessage(
self, message,
duration=(constants.OSD_DURATION*1000), OSDType=constants.OSD_NOTIFICATION, mood=constants.MESSAGE_NEUTRAL
):
self._mpcApi.sendOsd(message, constants.MPC_OSD_POSITION, duration)
@retry(MpcHcApi.PlayerNotReadyException, constants.MPC_MAX_RETRIES, constants.MPC_RETRY_WAIT_TIME, 1)
def setPaused(self, value):
if self._mpcApi.filePlaying:
if self.__switchPauseCalls:
value = not value
if value:
self._mpcApi.pause()
else:
self._mpcApi.unpause()
def setFeatures(self, featureList):
pass
@retry(MpcHcApi.PlayerNotReadyException, constants.MPC_MAX_RETRIES, constants.MPC_RETRY_WAIT_TIME, 1)
def setPosition(self, value):
if self._mpcApi.filePlaying:
self._mpcApi.seek(value)
def __getPosition(self):
self.__positionUpdate.clear()
self._mpcApi.askForCurrentPosition()
self.__positionUpdate.wait(constants.MPC_LOCK_WAIT_TIME)
return self._mpcApi.lastFilePosition
def askForStatus(self):
try:
if self._mpcApi.filePlaying and self.__preventAsking.wait(0) and self.__fileUpdate.acquire(0):
self.__fileUpdate.release()
position = self.__getPosition()
paused = self._mpcApi.isPaused()
position = float(position)
if self.__preventAsking.wait(0) and self.__fileUpdate.acquire(0):
self.__client.updatePlayerStatus(paused, position)
self.__fileUpdate.release()
else:
self.__echoGlobalStatus()
except MpcHcApi.PlayerNotReadyException:
self.__echoGlobalStatus()
def __echoGlobalStatus(self):
self.__client.updatePlayerStatus(self.__client.getGlobalPaused(), self.__client.getGlobalPosition())
def __forcePause(self):
for _ in range(constants.MPC_MAX_RETRIES):
self.setPaused(True)
time.sleep(constants.MPC_RETRY_WAIT_TIME)
def __refreshMpcPlayState(self):
for _ in range(2):
self._mpcApi.playPause()
time.sleep(constants.MPC_PAUSE_TOGGLE_DELAY)
def _setPausedAccordinglyToServer(self):
self.__forcePause()
self.setPaused(self.__client.getGlobalPaused())
if self._mpcApi.isPaused() != self.__client.getGlobalPaused():
self.__refreshMpcPlayState()
if self._mpcApi.isPaused() != self.__client.getGlobalPaused():
self.__setUpStateForNewlyOpenedFile()
@retry(MpcHcApi.PlayerNotReadyException, constants.MPC_MAX_RETRIES, constants.MPC_RETRY_WAIT_TIME, 1)
def __setUpStateForNewlyOpenedFile(self):
self._setPausedAccordinglyToServer()
self._mpcApi.seek(self.__client.getGlobalPosition())
def __handleUpdatedFilename(self):
with self.__fileUpdate:
self.__setUpStateForNewlyOpenedFile()
args = (self._mpcApi.filePlaying, self._mpcApi.fileDuration, self._mpcApi.filePath)
self.reactor.callFromThread(self.__client.updateFile, *args)
def sendCustomCommand(self, cmd, val):
self._mpcApi.sendRawCommand(cmd, val)
@staticmethod
def getDefaultPlayerPathsList():
return constants.MPC_PATHS
@staticmethod
def getIconPath(path):
if (
MPCHCAPIPlayer.getExpandedPath(path).lower().endswith('mpc-hc64.exe'.lower()) or
MPCHCAPIPlayer.getExpandedPath(path).lower().endswith('mpc-hc64_nvo.exe'.lower())
):
return constants.MPC64_ICONPATH
else:
return constants.MPC_ICONPATH
@staticmethod
def isValidPlayerPath(path):
if MPCHCAPIPlayer.getExpandedPath(path):
return True
return False
@staticmethod
def getExpandedPath(path):
if os.path.isfile(path):
if (
path.lower().endswith('mpc-hc.exe'.lower()) or path.lower().endswith('mpc-hcportable.exe'.lower()) or
path.lower().endswith('mpc-hc64.exe'.lower()) or path.lower().endswith('mpc-hc64_nvo.exe'.lower()) or
path.lower().endswith('mpc-hc_nvo.exe'.lower())
):
return path
if os.path.isfile(path + "mpc-hc.exe"):
path += "mpc-hc.exe"
return path
if os.path.isfile(path + "\\mpc-hc.exe"):
path += "\\mpc-hc.exe"
return path
if os.path.isfile(path + "mpc-hcportable.exe"):
path += "mpc-hcportable.exe"
return path
if os.path.isfile(path + "\\mpc-hcportable.exe"):
path += "\\mpc-hcportable.exe"
return path
if os.path.isfile(path + "mpc-hc_nvo.exe"):
path += "mpc-hc_nvo.exe"
return path
if os.path.isfile(path + "\\mpc-hc_nvo.exe"):
path += "\\mpc-hc_nvo.exe"
return path
if os.path.isfile(path + "mpc-hc64.exe"):
path += "mpc-hc64.exe"
return path
if os.path.isfile(path + "\\mpc-hc64.exe"):
path += "\\mpc-hc64.exe"
return path
if os.path.isfile(path + "mpc-hc64_nvo.exe"):
path += "mpc-hc64_nvo.exe"
return path
if os.path.isfile(path + "\\mpc-hc64_nvo.exe"):
path += "\\mpc-hc64_nvo.exe"
return path
|
test_dispatcher.py
|
from __future__ import print_function, division, absolute_import
import errno
import multiprocessing
import os
import platform
import shutil
import subprocess
import sys
import threading
import warnings
import inspect
import pickle
import weakref
from itertools import chain
try:
import jinja2
except ImportError:
jinja2 = None
try:
import pygments
except ImportError:
pygments = None
import numpy as np
from numba import unittest_support as unittest
from numba import utils, jit, generated_jit, types, typeof, errors
from numba import _dispatcher
from numba.compiler import compile_isolated
from numba.errors import NumbaWarning
from .support import (TestCase, tag, temp_directory, import_dynamic,
override_env_config, capture_cache_log, captured_stdout)
from numba.numpy_support import as_dtype
from numba.targets import codegen
from numba.caching import _UserWideCacheLocator
from numba.dispatcher import Dispatcher
from numba import parfor
from .test_linalg import needs_lapack
from .support import skip_parfors_unsupported
import llvmlite.binding as ll
_is_armv7l = platform.machine() == 'armv7l'
def dummy(x):
return x
def add(x, y):
return x + y
def addsub(x, y, z):
return x - y + z
def addsub_defaults(x, y=2, z=3):
return x - y + z
def star_defaults(x, y=2, *z):
return x, y, z
def generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x, y):
return x + y
else:
def impl(x, y):
return x - y
return impl
def bad_generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x):
return x
else:
def impl(x, y=6):
return x - y
return impl
def dtype_generated_usecase(a, b, dtype=None):
if isinstance(dtype, (types.misc.NoneType, types.misc.Omitted)):
out_dtype = np.result_type(*(np.dtype(ary.dtype.name)
for ary in (a, b)))
elif isinstance(dtype, (types.DType, types.NumberClass)):
out_dtype = as_dtype(dtype)
else:
raise TypeError("Unhandled Type %s" % type(dtype))
def _fn(a, b, dtype=None):
return np.ones(a.shape, dtype=out_dtype)
return _fn
class BaseTest(TestCase):
jit_args = dict(nopython=True)
def compile_func(self, pyfunc):
def check(*args, **kwargs):
expected = pyfunc(*args, **kwargs)
result = f(*args, **kwargs)
self.assertPreciseEqual(result, expected)
f = jit(**self.jit_args)(pyfunc)
return f, check
def check_access_is_preventable():
# This exists to check whether it is possible to prevent access to
# a file/directory through the use of `chmod 500`. If a user has
# elevated rights (e.g. root) then writes are likely to be possible
# anyway. Tests that require functioning access prevention are
# therefore skipped based on the result of this check.
tempdir = temp_directory('test_cache')
test_dir = (os.path.join(tempdir, 'writable_test'))
os.mkdir(test_dir)
# assume access prevention is not possible
ret = False
# check a write is possible
with open(os.path.join(test_dir, 'write_ok'), 'wt') as f:
f.write('check1')
# now forbid access
os.chmod(test_dir, 0o500)
try:
with open(os.path.join(test_dir, 'write_forbidden'), 'wt') as f:
f.write('check2')
except (OSError, IOError) as e:
# Check that the cause of the exception is due to access/permission
# as per
# https://github.com/conda/conda/blob/4.5.0/conda/gateways/disk/permissions.py#L35-L37 # noqa: E501
eno = getattr(e, 'errno', None)
if eno in (errno.EACCES, errno.EPERM):
# errno reports access/perm fail so access prevention via
# `chmod 500` works for this user.
ret = True
finally:
os.chmod(test_dir, 0o775)
shutil.rmtree(test_dir)
return ret
_access_preventable = check_access_is_preventable()
_access_msg = "Cannot create a directory to which writes are preventable"
skip_bad_access = unittest.skipUnless(_access_preventable, _access_msg)
class TestDispatcher(BaseTest):
def test_dyn_pyfunc(self):
@jit
def foo(x):
return x
foo(1)
[cr] = foo.overloads.values()
# __module__ must be match that of foo
self.assertEqual(cr.entry_point.__module__, foo.py_func.__module__)
def test_no_argument(self):
@jit
def foo():
return 1
# Just make sure this doesn't crash
foo()
def test_coerce_input_types(self):
# Issue #486: do not allow unsafe conversions if we can still
# compile other specializations.
c_add = jit(nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
self.assertPreciseEqual(c_add(12.3, 45.6), add(12.3, 45.6))
self.assertPreciseEqual(c_add(12.3, 45.6j), add(12.3, 45.6j))
self.assertPreciseEqual(c_add(12300000000, 456), add(12300000000, 456))
# Now force compilation of only a single specialization
c_add = jit('(i4, i4)', nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
# Implicit (unsafe) conversion of float to int
self.assertPreciseEqual(c_add(12.3, 45.6), add(12, 45))
with self.assertRaises(TypeError):
# Implicit conversion of complex to int disallowed
c_add(12.3, 45.6j)
def test_ambiguous_new_version(self):
"""Test compiling new version in an ambiguous case
"""
@jit
def foo(a, b):
return a + b
INT = 1
FLT = 1.5
self.assertAlmostEqual(foo(INT, FLT), INT + FLT)
self.assertEqual(len(foo.overloads), 1)
self.assertAlmostEqual(foo(FLT, INT), FLT + INT)
self.assertEqual(len(foo.overloads), 2)
self.assertAlmostEqual(foo(FLT, FLT), FLT + FLT)
self.assertEqual(len(foo.overloads), 3)
# The following call is ambiguous because (int, int) can resolve
# to (float, int) or (int, float) with equal weight.
self.assertAlmostEqual(foo(1, 1), INT + INT)
self.assertEqual(len(foo.overloads), 4, "didn't compile a new "
"version")
def test_lock(self):
"""
Test that (lazy) compiling from several threads at once doesn't
produce errors (see issue #908).
"""
errors = []
@jit
def foo(x):
return x + 1
def wrapper():
try:
self.assertEqual(foo(1), 2)
except Exception as e:
errors.append(e)
threads = [threading.Thread(target=wrapper) for i in range(16)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(errors)
def test_explicit_signatures(self):
f = jit("(int64,int64)")(add)
# Approximate match (unsafe conversion)
self.assertPreciseEqual(f(1.5, 2.5), 3)
self.assertEqual(len(f.overloads), 1, f.overloads)
f = jit(["(int64,int64)", "(float64,float64)"])(add)
# Exact signature matches
self.assertPreciseEqual(f(1, 2), 3)
self.assertPreciseEqual(f(1.5, 2.5), 4.0)
# Approximate match (int32 -> float64 is a safe conversion)
self.assertPreciseEqual(f(np.int32(1), 2.5), 3.5)
# No conversion
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertIn("No matching definition", str(cm.exception))
self.assertEqual(len(f.overloads), 2, f.overloads)
# A more interesting one...
f = jit(["(float32,float32)", "(float64,float64)"])(add)
self.assertPreciseEqual(f(np.float32(1), np.float32(2**-25)), 1.0)
self.assertPreciseEqual(f(1, 2**-25), 1.0000000298023224)
# Fail to resolve ambiguity between the two best overloads
f = jit(["(float32,float64)",
"(float64,float32)",
"(int64,int64)"])(add)
with self.assertRaises(TypeError) as cm:
f(1.0, 2.0)
# The two best matches are output in the error message, as well
# as the actual argument types.
self.assertRegexpMatches(
str(cm.exception),
r"Ambiguous overloading for <function add [^>]*> "
r"\(float64, float64\):\n"
r"\(float32, float64\) -> float64\n"
r"\(float64, float32\) -> float64"
)
# The integer signature is not part of the best matches
self.assertNotIn("int64", str(cm.exception))
def test_signature_mismatch(self):
tmpl = ("Signature mismatch: %d argument types given, but function "
"takes 2 arguments")
with self.assertRaises(TypeError) as cm:
jit("()")(add)
self.assertIn(tmpl % 0, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,)")(add)
self.assertIn(tmpl % 1, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,intc,intc)")(add)
self.assertIn(tmpl % 3, str(cm.exception))
# With forceobj=True, an empty tuple is accepted
jit("()", forceobj=True)(add)
with self.assertRaises(TypeError) as cm:
jit("(intc,)", forceobj=True)(add)
self.assertIn(tmpl % 1, str(cm.exception))
def test_matching_error_message(self):
f = jit("(intc,intc)")(add)
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertEqual(str(cm.exception),
"No matching definition for argument type(s) "
"complex128, complex128")
def test_disabled_compilation(self):
@jit
def foo(a):
return a
foo.compile("(float32,)")
foo.disable_compile()
with self.assertRaises(RuntimeError) as raises:
foo.compile("(int32,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 1)
def test_disabled_compilation_through_list(self):
@jit(["(float32,)", "(int32,)"])
def foo(a):
return a
with self.assertRaises(RuntimeError) as raises:
foo.compile("(complex64,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 2)
def test_disabled_compilation_nested_call(self):
@jit(["(intp,)"])
def foo(a):
return a
@jit
def bar():
foo(1)
foo(np.ones(1)) # no matching definition
with self.assertRaises(TypeError) as raises:
bar()
m = "No matching definition for argument type(s) array(float64, 1d, C)"
self.assertEqual(str(raises.exception), m)
def test_fingerprint_failure(self):
"""
Failure in computing the fingerprint cannot affect a nopython=False
function. On the other hand, with nopython=True, a ValueError should
be raised to report the failure with fingerprint.
"""
@jit
def foo(x):
return x
# Empty list will trigger failure in compile_fingerprint
errmsg = 'cannot compute fingerprint of empty list'
with self.assertRaises(ValueError) as raises:
_dispatcher.compute_fingerprint([])
self.assertIn(errmsg, str(raises.exception))
# It should work in fallback
self.assertEqual(foo([]), [])
# But, not in nopython=True
strict_foo = jit(nopython=True)(foo.py_func)
with self.assertRaises(ValueError) as raises:
strict_foo([])
self.assertIn(errmsg, str(raises.exception))
# Test in loop lifting context
@jit
def bar():
object() # force looplifting
x = []
for i in range(10):
x = foo(x)
return x
self.assertEqual(bar(), [])
# Make sure it was looplifted
[cr] = bar.overloads.values()
self.assertEqual(len(cr.lifted), 1)
def test_serialization(self):
"""
Test serialization of Dispatcher objects
"""
@jit(nopython=True)
def foo(x):
return x + 1
self.assertEqual(foo(1), 2)
# get serialization memo
memo = Dispatcher._memo
Dispatcher._recent.clear()
memo_size = len(memo)
# pickle foo and check memo size
serialized_foo = pickle.dumps(foo)
# increases the memo size
self.assertEqual(memo_size + 1, len(memo))
# unpickle
foo_rebuilt = pickle.loads(serialized_foo)
self.assertEqual(memo_size + 1, len(memo))
self.assertIs(foo, foo_rebuilt)
# do we get the same object even if we delete all the explict
# references?
id_orig = id(foo_rebuilt)
del foo
del foo_rebuilt
self.assertEqual(memo_size + 1, len(memo))
new_foo = pickle.loads(serialized_foo)
self.assertEqual(id_orig, id(new_foo))
# now clear the recent cache
ref = weakref.ref(new_foo)
del new_foo
Dispatcher._recent.clear()
self.assertEqual(memo_size, len(memo))
# show that deserializing creates a new object
pickle.loads(serialized_foo)
self.assertIs(ref(), None)
@needs_lapack
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_misaligned_array_dispatch(self):
# for context see issue #2937
def foo(a):
return np.linalg.matrix_power(a, 1)
jitfoo = jit(nopython=True)(foo)
n = 64
r = int(np.sqrt(n))
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r)
C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r)
F_contig_aligned = C_contig_aligned.T
F_contig_misaligned = C_contig_misaligned.T
# checking routine
def check(name, a):
a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r)
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# The checks must be run in this order to create the dispatch key
# sequence that causes invalid dispatch noted in #2937.
# The first two should hit the cache as they are aligned, supported
# order and under 5 dimensions. The second two should end up in the
# fallback path as they are misaligned.
check("C_contig_aligned", C_contig_aligned)
check("F_contig_aligned", F_contig_aligned)
check("C_contig_misaligned", C_contig_misaligned)
check("F_contig_misaligned", F_contig_misaligned)
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_immutability_in_array_dispatch(self):
# RO operation in function
def foo(a):
return np.sum(a)
jitfoo = jit(nopython=True)(foo)
n = 64
r = int(np.sqrt(n))
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r)
C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r)
F_contig_aligned = C_contig_aligned.T
F_contig_misaligned = C_contig_misaligned.T
# checking routine
def check(name, a, disable_write_bit=False):
a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r)
if disable_write_bit:
a.flags.writeable = False
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# all of these should end up in the fallback path as they have no write
# bit set
check("C_contig_aligned", C_contig_aligned, disable_write_bit=True)
check("F_contig_aligned", F_contig_aligned, disable_write_bit=True)
check("C_contig_misaligned", C_contig_misaligned,
disable_write_bit=True)
check("F_contig_misaligned", F_contig_misaligned,
disable_write_bit=True)
@needs_lapack
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_misaligned_high_dimension_array_dispatch(self):
def foo(a):
return np.linalg.matrix_power(a[0, 0, 0, 0, :, :], 1)
jitfoo = jit(nopython=True)(foo)
def check_properties(arr, layout, aligned):
self.assertEqual(arr.flags.aligned, aligned)
if layout == "C":
self.assertEqual(arr.flags.c_contiguous, True)
if layout == "F":
self.assertEqual(arr.flags.f_contiguous, True)
n = 729
r = 3
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).\
reshape(r, r, r, r, r, r)
check_properties(C_contig_aligned, 'C', True)
C_contig_misaligned = tmp[1:].view(np.complex128).\
reshape(r, r, r, r, r, r)
check_properties(C_contig_misaligned, 'C', False)
F_contig_aligned = C_contig_aligned.T
check_properties(F_contig_aligned, 'F', True)
F_contig_misaligned = C_contig_misaligned.T
check_properties(F_contig_misaligned, 'F', False)
# checking routine
def check(name, a):
a[:, :] = np.arange(n, dtype=np.complex128).\
reshape(r, r, r, r, r, r)
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# these should all hit the fallback path as the cache is only for up to
# 5 dimensions
check("F_contig_misaligned", F_contig_misaligned)
check("C_contig_aligned", C_contig_aligned)
check("F_contig_aligned", F_contig_aligned)
check("C_contig_misaligned", C_contig_misaligned)
def test_dispatch_recompiles_for_scalars(self):
# for context #3612, essentially, compiling a lambda x:x for a
# numerically wide type (everything can be converted to a complex128)
# and then calling again with e.g. an int32 would lead to the int32
# being converted to a complex128 whereas it ought to compile an int32
# specialization.
def foo(x):
return x
# jit and compile on dispatch for 3 scalar types, expect 3 signatures
jitfoo = jit(nopython=True)(foo)
jitfoo(np.complex128(1 + 2j))
jitfoo(np.int32(10))
jitfoo(np.bool_(False))
self.assertEqual(len(jitfoo.signatures), 3)
expected_sigs = [(types.complex128,), (types.int32,), (types.bool_,)]
self.assertEqual(jitfoo.signatures, expected_sigs)
# now jit with signatures so recompilation is forbidden
# expect 1 signature and type conversion
jitfoo = jit([(types.complex128,)], nopython=True)(foo)
jitfoo(np.complex128(1 + 2j))
jitfoo(np.int32(10))
jitfoo(np.bool_(False))
self.assertEqual(len(jitfoo.signatures), 1)
expected_sigs = [(types.complex128,)]
self.assertEqual(jitfoo.signatures, expected_sigs)
class TestSignatureHandling(BaseTest):
"""
Test support for various parameter passing styles.
"""
@tag('important')
def test_named_args(self):
"""
Test passing named arguments to a dispatcher.
"""
f, check = self.compile_func(addsub)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# All calls above fall under the same specialization
self.assertEqual(len(f.overloads), 1)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected 3, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6)
self.assertIn("missing argument 'z'", str(cm.exception))
def test_default_args(self):
"""
Test omitting arguments with a default value.
"""
f, check = self.compile_func(addsub_defaults)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# Now omitting some values
check(3, z=10)
check(3, 4)
check(x=3, y=4)
check(3)
check(x=3)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected at least 1, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(y=6, z=7)
self.assertIn("missing argument 'x'", str(cm.exception))
def test_star_args(self):
"""
Test a compiled function with starargs in the signature.
"""
f, check = self.compile_func(star_defaults)
check(4)
check(4, 5)
check(4, 5, 6)
check(4, 5, 6, 7)
check(4, 5, 6, 7, 8)
check(x=4)
check(x=4, y=5)
check(4, y=5)
with self.assertRaises(TypeError) as cm:
f(4, 5, y=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, 5, z=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, x=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
class TestSignatureHandlingObjectMode(TestSignatureHandling):
"""
Sams as TestSignatureHandling, but in object mode.
"""
jit_args = dict(forceobj=True)
class TestGeneratedDispatcher(TestCase):
"""
Tests for @generated_jit.
"""
@tag('important')
def test_generated(self):
f = generated_jit(nopython=True)(generated_usecase)
self.assertEqual(f(8), 8 - 5)
self.assertEqual(f(x=8), 8 - 5)
self.assertEqual(f(x=8, y=4), 8 - 4)
self.assertEqual(f(1j), 5 + 1j)
self.assertEqual(f(1j, 42), 42 + 1j)
self.assertEqual(f(x=1j, y=7), 7 + 1j)
@tag('important')
def test_generated_dtype(self):
f = generated_jit(nopython=True)(dtype_generated_usecase)
a = np.ones((10,), dtype=np.float32)
b = np.ones((10,), dtype=np.float64)
self.assertEqual(f(a, b).dtype, np.float64)
self.assertEqual(f(a, b, dtype=np.dtype('int32')).dtype, np.int32)
self.assertEqual(f(a, b, dtype=np.int32).dtype, np.int32)
def test_signature_errors(self):
"""
Check error reporting when implementation signature doesn't match
generating function signature.
"""
f = generated_jit(nopython=True)(bad_generated_usecase)
# Mismatching # of arguments
with self.assertRaises(TypeError) as raises:
f(1j)
self.assertIn("should be compatible with signature '(x, y=5)', "
"but has signature '(x)'",
str(raises.exception))
# Mismatching defaults
with self.assertRaises(TypeError) as raises:
f(1)
self.assertIn("should be compatible with signature '(x, y=5)', "
"but has signature '(x, y=6)'",
str(raises.exception))
class TestDispatcherMethods(TestCase):
def test_recompile(self):
closure = 1
@jit
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2.5)
self.assertEqual(len(foo.signatures), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
# Everything was recompiled
self.assertEqual(len(foo.signatures), 2)
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3.5)
def test_recompile_signatures(self):
# Same as above, but with an explicit signature on @jit.
closure = 1
@jit("int32(int32)")
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3)
@tag('important')
def test_inspect_llvm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
llvms = foo.inspect_llvm()
self.assertEqual(len(llvms), 3)
# make sure the function name shows up in the llvm
for llvm_bc in llvms.values():
# Look for the function name
self.assertIn("foo", llvm_bc)
# Look for the argument names
self.assertIn("explicit_arg1", llvm_bc)
self.assertIn("explicit_arg2", llvm_bc)
def test_inspect_asm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
asms = foo.inspect_asm()
self.assertEqual(len(asms), 3)
# make sure the function name shows up in the llvm
for asm in asms.values():
# Look for the function name
self.assertTrue("foo" in asm)
def _check_cfg_display(self, cfg, wrapper=''):
# simple stringify test
if wrapper:
wrapper = "{}{}".format(len(wrapper), wrapper)
module_name = __name__.split('.', 1)[0]
module_len = len(module_name)
prefix = r'^digraph "CFG for \'_ZN{}{}{}'.format(wrapper,
module_len,
module_name)
self.assertRegexpMatches(str(cfg), prefix)
# .display() requires an optional dependency on `graphviz`.
# just test for the attribute without running it.
self.assertTrue(callable(cfg.display))
def test_inspect_cfg(self):
# Exercise the .inspect_cfg(). These are minimal tests and do not fully
# check the correctness of the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg() without arguments
cfgs = foo.inspect_cfg()
# Correct count of overloads
self.assertEqual(len(cfgs), 3)
# Makes sure all the signatures are correct
[s1, s2, s3] = cfgs.keys()
self.assertEqual(set([s1, s2, s3]),
set(map(lambda x: (typeof(x),), [a1, a2, a3])))
for cfg in cfgs.values():
self._check_cfg_display(cfg)
self.assertEqual(len(list(cfgs.values())), 3)
# Call inspect_cfg(signature)
cfg = foo.inspect_cfg(signature=foo.signatures[0])
self._check_cfg_display(cfg)
def test_inspect_cfg_with_python_wrapper(self):
# Exercise the .inspect_cfg() including the python wrapper.
# These are minimal tests and do not fully check the correctness of
# the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg(signature, show_wrapper="python")
cfg = foo.inspect_cfg(signature=foo.signatures[0],
show_wrapper="python")
self._check_cfg_display(cfg, wrapper='cpython')
def test_inspect_types(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method
foo.inspect_types(utils.StringIO())
# Test output
expected = str(foo.overloads[foo.signatures[0]].type_annotation)
with captured_stdout() as out:
foo.inspect_types()
assert expected in out.getvalue()
def test_inspect_types_with_signature(self):
@jit
def foo(a):
return a + 1
foo(1)
foo(1.0)
# Inspect all signatures
with captured_stdout() as total:
foo.inspect_types()
# Inspect first signature
with captured_stdout() as first:
foo.inspect_types(signature=foo.signatures[0])
# Inspect second signature
with captured_stdout() as second:
foo.inspect_types(signature=foo.signatures[1])
self.assertEqual(total.getvalue(), first.getvalue() + second.getvalue())
@unittest.skipIf(jinja2 is None, "please install the 'jinja2' package")
@unittest.skipIf(pygments is None, "please install the 'pygments' package")
def test_inspect_types_pretty(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method, dump the output
with captured_stdout():
ann = foo.inspect_types(pretty=True)
# ensure HTML <span> is found in the annotation output
for k, v in ann.ann.items():
span_found = False
for line in v['pygments_lines']:
if 'span' in line[2]:
span_found = True
self.assertTrue(span_found)
# check that file+pretty kwarg combo raises
with self.assertRaises(ValueError) as raises:
foo.inspect_types(file=utils.StringIO(), pretty=True)
self.assertIn("`file` must be None if `pretty=True`",
str(raises.exception))
def test_get_annotation_info(self):
@jit
def foo(a):
return a + 1
foo(1)
foo(1.3)
expected = dict(chain.from_iterable(foo.get_annotation_info(i).items()
for i in foo.signatures))
result = foo.get_annotation_info()
self.assertEqual(expected, result)
def test_issue_with_array_layout_conflict(self):
"""
This test an issue with the dispatcher when an array that is both
C and F contiguous is supplied as the first signature.
The dispatcher checks for F contiguous first but the compiler checks
for C contiguous first. This results in an C contiguous code inserted
as F contiguous function.
"""
def pyfunc(A, i, j):
return A[i, j]
cfunc = jit(pyfunc)
ary_c_and_f = np.array([[1.]])
ary_c = np.array([[0., 1.], [2., 3.]], order='C')
ary_f = np.array([[0., 1.], [2., 3.]], order='F')
exp_c = pyfunc(ary_c, 1, 0)
exp_f = pyfunc(ary_f, 1, 0)
self.assertEqual(1., cfunc(ary_c_and_f, 0, 0))
got_c = cfunc(ary_c, 1, 0)
got_f = cfunc(ary_f, 1, 0)
self.assertEqual(exp_c, got_c)
self.assertEqual(exp_f, got_f)
class BaseCacheTest(TestCase):
# This class is also used in test_cfunc.py.
# The source file that will be copied
usecases_file = None
# Make sure this doesn't conflict with another module
modname = None
def setUp(self):
self.tempdir = temp_directory('test_cache')
sys.path.insert(0, self.tempdir)
self.modfile = os.path.join(self.tempdir, self.modname + ".py")
self.cache_dir = os.path.join(self.tempdir, "__pycache__")
shutil.copy(self.usecases_file, self.modfile)
self.maxDiff = None
def tearDown(self):
sys.modules.pop(self.modname, None)
sys.path.remove(self.tempdir)
def import_module(self):
# Import a fresh version of the test module. All jitted functions
# in the test module will start anew and load overloads from
# the on-disk cache if possible.
old = sys.modules.pop(self.modname, None)
if old is not None:
# Make sure cached bytecode is removed
if sys.version_info >= (3,):
cached = [old.__cached__]
else:
if old.__file__.endswith(('.pyc', '.pyo')):
cached = [old.__file__]
else:
cached = [old.__file__ + 'c', old.__file__ + 'o']
for fn in cached:
try:
os.unlink(fn)
except OSError as e:
if e.errno != errno.ENOENT:
raise
mod = import_dynamic(self.modname)
self.assertEqual(mod.__file__.rstrip('co'), self.modfile)
return mod
def cache_contents(self):
try:
return [fn for fn in os.listdir(self.cache_dir)
if not fn.endswith(('.pyc', ".pyo"))]
except OSError as e:
if e.errno != errno.ENOENT:
raise
return []
def get_cache_mtimes(self):
return dict((fn, os.path.getmtime(os.path.join(self.cache_dir, fn)))
for fn in sorted(self.cache_contents()))
def check_pycache(self, n):
c = self.cache_contents()
self.assertEqual(len(c), n, c)
def dummy_test(self):
pass
class BaseCacheUsecasesTest(BaseCacheTest):
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def run_in_separate_process(self):
# Cached functions can be run from a distinct process.
# Also stresses issue #1603: uncached function calling cached function
# shouldn't fail compiling.
code = """if 1:
import sys
sys.path.insert(0, %(tempdir)r)
mod = __import__(%(modname)r)
mod.self_test()
""" % dict(tempdir=self.tempdir, modname=self.modname)
popen = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError("process failed with code %s: "
"stderr follows\n%s\n"
% (popen.returncode, err.decode()))
def check_module(self, mod):
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
mod.self_test()
def check_hits(self, func, hits, misses=None):
st = func.stats
self.assertEqual(sum(st.cache_hits.values()), hits, st.cache_hits)
if misses is not None:
self.assertEqual(sum(st.cache_misses.values()), misses,
st.cache_misses)
class TestCache(BaseCacheUsecasesTest):
@tag('important')
def test_caching(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
self.check_hits(f, 0, 2)
f = mod.record_return
rec = f(mod.aligned_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
rec = f(mod.packed_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
self.check_pycache(9) # 3 index, 6 data
self.check_hits(f, 0, 2)
f = mod.generated_usecase
self.assertPreciseEqual(f(3, 2), 1)
self.assertPreciseEqual(f(3j, 2), 2 + 3j)
# Check the code runs ok from another process
self.run_in_separate_process()
@tag('important')
def test_caching_nrt_pruned(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
# NRT pruning may affect cache
self.assertPreciseEqual(f(2, np.arange(3)), 2 + np.arange(3) + 1)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
def test_inner_then_outer(self):
# Caching inner then outer function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.inner(3, 2), 6)
self.check_pycache(2) # 1 index, 1 data
# Uncached outer function shouldn't fail (issue #1603)
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
mod = self.import_module()
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
# Cached outer will create new cache entries
f = mod.outer
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(f(3.5, 2), 2.5)
self.check_pycache(6) # 2 index, 4 data
def test_outer_then_inner(self):
# Caching outer then inner function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.outer(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(mod.outer_uncached(3, 2), 2)
self.check_pycache(4) # same
mod = self.import_module()
f = mod.inner
self.assertPreciseEqual(f(3, 2), 6)
self.check_pycache(4) # same
self.assertPreciseEqual(f(3.5, 2), 6.5)
self.check_pycache(5) # 2 index, 3 data
def test_no_caching(self):
mod = self.import_module()
f = mod.add_nocache_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(0)
def test_looplifted(self):
# Loop-lifted functions can't be cached and raise a warning
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.looplifted
self.assertPreciseEqual(f(4), 6)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "looplifted" '
'as it uses lifted loops', str(w[0].message))
def test_big_array(self):
# Code references big array globals cannot be cached
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.use_big_array
np.testing.assert_equal(f(), mod.biggie)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "use_big_array" '
'as it uses dynamic globals', str(w[0].message))
def test_ctypes(self):
# Functions using a ctypes pointer can't be cached and raise
# a warning.
mod = self.import_module()
for f in [mod.use_c_sin, mod.use_c_sin_nest1, mod.use_c_sin_nest2]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
self.assertPreciseEqual(f(0.0), 0.0)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn(
'Cannot cache compiled function "{}"'.format(f.__name__),
str(w[0].message),
)
def test_closure(self):
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.closure1
self.assertPreciseEqual(f(3), 6)
f = mod.closure2
self.assertPreciseEqual(f(3), 8)
self.check_pycache(0)
self.assertEqual(len(w), 2)
for item in w:
self.assertIn('Cannot cache compiled function "closure"',
str(item.message))
def test_cache_reuse(self):
mod = self.import_module()
mod.add_usecase(2, 3)
mod.add_usecase(2.5, 3.5)
mod.add_objmode_usecase(2, 3)
mod.outer_uncached(2, 3)
mod.outer(2, 3)
mod.record_return(mod.packed_arr, 0)
mod.record_return(mod.aligned_arr, 1)
mod.generated_usecase(2, 3)
mtimes = self.get_cache_mtimes()
# Two signatures compiled
self.check_hits(mod.add_usecase, 0, 2)
mod2 = self.import_module()
self.assertIsNot(mod, mod2)
f = mod2.add_usecase
f(2, 3)
self.check_hits(f, 1, 0)
f(2.5, 3.5)
self.check_hits(f, 2, 0)
f = mod2.add_objmode_usecase
f(2, 3)
self.check_hits(f, 1, 0)
# The files haven't changed
self.assertEqual(self.get_cache_mtimes(), mtimes)
self.run_in_separate_process()
self.assertEqual(self.get_cache_mtimes(), mtimes)
def test_cache_invalidate(self):
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
# This should change the functions' results
with open(self.modfile, "a") as f:
f.write("\nZ = 10\n")
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_recompile(self):
# Explicit call to recompile() should overwrite the cache
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
mod = self.import_module()
f = mod.add_usecase
mod.Z = 10
self.assertPreciseEqual(f(2, 3), 6)
f.recompile()
self.assertPreciseEqual(f(2, 3), 15)
# Freshly recompiled version is re-used from other imports
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_same_names(self):
# Function with the same names should still disambiguate
mod = self.import_module()
f = mod.renamed_function1
self.assertPreciseEqual(f(2), 4)
f = mod.renamed_function2
self.assertPreciseEqual(f(2), 8)
def test_frozen(self):
from .dummy_module import function
old_code = function.__code__
code_obj = compile('pass', 'tests/dummy_module.py', 'exec')
try:
function.__code__ = code_obj
source = inspect.getfile(function)
# doesn't return anything, since it cannot find the module
# fails unless the executable is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsNone(locator)
sys.frozen = True
# returns a cache locator object, only works when the executable
# is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsInstance(locator, _UserWideCacheLocator)
finally:
function.__code__ = old_code
del sys.frozen
def _test_pycache_fallback(self):
"""
With a disabled __pycache__, test there is a working fallback
(e.g. on the user-wide cache dir)
"""
mod = self.import_module()
f = mod.add_usecase
# Remove this function's cache files at the end, to avoid accumulation
# accross test calls.
self.addCleanup(shutil.rmtree, f.stats.cache_path, ignore_errors=True)
self.assertPreciseEqual(f(2, 3), 6)
# It's a cache miss since the file was copied to a new temp location
self.check_hits(f, 0, 1)
# Test re-use
mod2 = self.import_module()
f = mod2.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_hits(f, 1, 0)
# The __pycache__ is empty (otherwise the test's preconditions
# wouldn't be met)
self.check_pycache(0)
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_creatable_pycache(self):
# Make it impossible to create the __pycache__ directory
old_perms = os.stat(self.tempdir).st_mode
os.chmod(self.tempdir, 0o500)
self.addCleanup(os.chmod, self.tempdir, old_perms)
self._test_pycache_fallback()
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_writable_pycache(self):
# Make it impossible to write to the __pycache__ directory
pycache = os.path.join(self.tempdir, '__pycache__')
os.mkdir(pycache)
old_perms = os.stat(pycache).st_mode
os.chmod(pycache, 0o500)
self.addCleanup(os.chmod, pycache, old_perms)
self._test_pycache_fallback()
def test_ipython(self):
# Test caching in an IPython session
base_cmd = [sys.executable, '-m', 'IPython']
base_cmd += ['--quiet', '--quick', '--no-banner', '--colors=NoColor']
try:
ver = subprocess.check_output(base_cmd + ['--version'])
except subprocess.CalledProcessError as e:
self.skipTest("ipython not available: return code %d"
% e.returncode)
ver = ver.strip().decode()
print("ipython version:", ver)
# Create test input
inputfn = os.path.join(self.tempdir, "ipython_cache_usecase.txt")
with open(inputfn, "w") as f:
f.write(r"""
import os
import sys
from numba import jit
# IPython 5 does not support multiline input if stdin isn't
# a tty (https://github.com/ipython/ipython/issues/9752)
f = jit(cache=True)(lambda: 42)
res = f()
# IPython writes on stdout, so use stderr instead
sys.stderr.write(u"cache hits = %d\n" % f.stats.cache_hits[()])
# IPython hijacks sys.exit(), bypass it
sys.stdout.flush()
sys.stderr.flush()
os._exit(res)
""")
def execute_with_input():
# Feed the test input as stdin, to execute it in REPL context
with open(inputfn, "rb") as stdin:
p = subprocess.Popen(base_cmd, stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
if p.returncode != 42:
self.fail("unexpected return code %d\n"
"-- stdout:\n%s\n"
"-- stderr:\n%s\n"
% (p.returncode, out, err))
return err
execute_with_input()
# Run a second time and check caching
err = execute_with_input()
self.assertIn("cache hits = 1", err.strip())
@skip_parfors_unsupported
class TestSequentialParForsCache(BaseCacheUsecasesTest):
def setUp(self):
super(TestSequentialParForsCache, self).setUp()
# Turn on sequential parfor lowering
parfor.sequential_parfor_lowering = True
def tearDown(self):
super(TestSequentialParForsCache, self).tearDown()
# Turn off sequential parfor lowering
parfor.sequential_parfor_lowering = False
def test_caching(self):
mod = self.import_module()
self.check_pycache(0)
f = mod.parfor_usecase
ary = np.ones(10)
self.assertPreciseEqual(f(ary), ary * ary + ary)
dynamic_globals = [cres.library.has_dynamic_globals
for cres in f.overloads.values()]
self.assertEqual(dynamic_globals, [False])
self.check_pycache(2) # 1 index, 1 data
class TestCacheWithCpuSetting(BaseCacheUsecasesTest):
# Disable parallel testing due to envvars modification
_numba_parallel_test_ = False
def check_later_mtimes(self, mtimes_old):
match_count = 0
for k, v in self.get_cache_mtimes().items():
if k in mtimes_old:
self.assertGreaterEqual(v, mtimes_old[k])
match_count += 1
self.assertGreater(match_count, 0,
msg='nothing to compare')
def test_user_set_cpu_name(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU name to generic
with override_env_config('NUMBA_CPU_NAME', 'generic'):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][1] == ll.get_host_cpu_name():
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], codegen.get_host_cpu_features())
self.assertEqual(key_generic[1][1], 'generic')
self.assertEqual(key_generic[1][2], '')
def test_user_set_cpu_features(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU feature
my_cpu_features = '-sse;-avx'
system_features = codegen.get_host_cpu_features()
self.assertNotEqual(system_features, my_cpu_features)
with override_env_config('NUMBA_CPU_FEATURES', my_cpu_features):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][2] == system_features:
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], system_features)
self.assertEqual(key_generic[1][1], ll.get_host_cpu_name())
self.assertEqual(key_generic[1][2], my_cpu_features)
class TestMultiprocessCache(BaseCacheTest):
# Nested multiprocessing.Pool raises AssertionError:
# "daemonic processes are not allowed to have children"
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def test_multiprocessing(self):
# Check caching works from multiple processes at once (#2028)
mod = self.import_module()
# Calling a pure Python caller of the JIT-compiled function is
# necessary to reproduce the issue.
f = mod.simple_usecase_caller
n = 3
try:
ctx = multiprocessing.get_context('spawn')
except AttributeError:
ctx = multiprocessing
pool = ctx.Pool(n)
try:
res = sum(pool.imap(f, range(n)))
finally:
pool.close()
self.assertEqual(res, n * (n - 1) // 2)
class TestCacheFileCollision(unittest.TestCase):
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "caching_file_loc_fodder"
source_text_1 = """
from numba import njit
@njit(cache=True)
def bar():
return 123
"""
source_text_2 = """
from numba import njit
@njit(cache=True)
def bar():
return 321
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
sys.path.insert(0, self.tempdir)
self.modname = 'module_name_that_is_unlikely'
self.assertNotIn(self.modname, sys.modules)
self.modname_bar1 = self.modname
self.modname_bar2 = '.'.join([self.modname, 'foo'])
foomod = os.path.join(self.tempdir, self.modname)
os.mkdir(foomod)
with open(os.path.join(foomod, '__init__.py'), 'w') as fout:
print(self.source_text_1, file=fout)
with open(os.path.join(foomod, 'foo.py'), 'w') as fout:
print(self.source_text_2, file=fout)
def tearDown(self):
sys.modules.pop(self.modname_bar1, None)
sys.modules.pop(self.modname_bar2, None)
sys.path.remove(self.tempdir)
def import_bar1(self):
return import_dynamic(self.modname_bar1).bar
def import_bar2(self):
return import_dynamic(self.modname_bar2).bar
def test_file_location(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
# Check that the cache file is named correctly
idxname1 = bar1._cache._cache_file._index_name
idxname2 = bar2._cache._cache_file._index_name
self.assertNotEqual(idxname1, idxname2)
self.assertTrue(idxname1.startswith("__init__.bar-3.py"))
self.assertTrue(idxname2.startswith("foo.bar-3.py"))
@unittest.skipUnless(hasattr(multiprocessing, 'get_context'),
'Test requires multiprocessing.get_context')
def test_no_collision(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
with capture_cache_log() as buf:
res1 = bar1()
cachelog = buf.getvalue()
# bar1 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
with capture_cache_log() as buf:
res2 = bar2()
cachelog = buf.getvalue()
# bar2 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
self.assertNotEqual(res1, res2)
try:
# Make sure we can spawn new process without inheriting
# the parent context.
mp = multiprocessing.get_context('spawn')
except ValueError:
print("missing spawn context")
q = mp.Queue()
# Start new process that calls `cache_file_collision_tester`
proc = mp.Process(target=cache_file_collision_tester,
args=(q, self.tempdir,
self.modname_bar1,
self.modname_bar2))
proc.start()
# Get results from the process
log1 = q.get()
got1 = q.get()
log2 = q.get()
got2 = q.get()
proc.join()
# The remote execution result of bar1() and bar2() should match
# the one executed locally.
self.assertEqual(got1, res1)
self.assertEqual(got2, res2)
# The remote should have loaded bar1 from cache
self.assertEqual(log1.count('index saved'), 0)
self.assertEqual(log1.count('data saved'), 0)
self.assertEqual(log1.count('index loaded'), 1)
self.assertEqual(log1.count('data loaded'), 1)
# The remote should have loaded bar2 from cache
self.assertEqual(log2.count('index saved'), 0)
self.assertEqual(log2.count('data saved'), 0)
self.assertEqual(log2.count('index loaded'), 1)
self.assertEqual(log2.count('data loaded'), 1)
def cache_file_collision_tester(q, tempdir, modname_bar1, modname_bar2):
sys.path.insert(0, tempdir)
bar1 = import_dynamic(modname_bar1).bar
bar2 = import_dynamic(modname_bar2).bar
with capture_cache_log() as buf:
r1 = bar1()
q.put(buf.getvalue())
q.put(r1)
with capture_cache_log() as buf:
r2 = bar2()
q.put(buf.getvalue())
q.put(r2)
class TestCacheMultipleFilesWithSignature(unittest.TestCase):
# Regression test for https://github.com/numba/numba/issues/3658
_numba_parallel_test_ = False
source_text_file1 = """
from file2 import function2
"""
source_text_file2 = """
from numba import njit
@njit('float64(float64)', cache=True)
def function1(x):
return x
@njit('float64(float64)', cache=True)
def function2(x):
return x
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
self.file1 = os.path.join(self.tempdir, 'file1.py')
with open(self.file1, 'w') as fout:
print(self.source_text_file1, file=fout)
self.file2 = os.path.join(self.tempdir, 'file2.py')
with open(self.file2, 'w') as fout:
print(self.source_text_file2, file=fout)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_caching_mutliple_files_with_signature(self):
# Execute file1.py
popen = subprocess.Popen([sys.executable, self.file1],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
self.assertEqual(popen.returncode, 0)
# Execute file2.py
popen = subprocess.Popen([sys.executable, self.file2],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
self.assertEqual(popen.returncode, 0)
class TestDispatcherFunctionBoundaries(TestCase):
def test_pass_dispatcher_as_arg(self):
# Test that a Dispatcher object can be pass as argument
@jit(nopython=True)
def add1(x):
return x + 1
@jit(nopython=True)
def bar(fn, x):
return fn(x)
@jit(nopython=True)
def foo(x):
return bar(add1, x)
# Check dispatcher as argument inside NPM
inputs = [1, 11.1, np.arange(10)]
expected_results = [x + 1 for x in inputs]
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(foo(arg), expect)
# Check dispatcher as argument from python
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(bar(add1, arg), expect)
def test_dispatcher_as_arg_usecase(self):
@jit(nopython=True)
def maximum(seq, cmpfn):
tmp = seq[0]
for each in seq[1:]:
cmpval = cmpfn(tmp, each)
if cmpval < 0:
tmp = each
return tmp
got = maximum([1, 2, 3, 4], cmpfn=jit(lambda x, y: x - y))
self.assertEqual(got, 4)
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[0] - y[0]))
self.assertEqual(got, (4, 0))
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[1] - y[1]))
self.assertEqual(got, (0, 4))
def test_dispatcher_cannot_return_to_python(self):
@jit(nopython=True)
def foo(fn):
return fn
fn = jit(lambda x: x)
with self.assertRaises(TypeError) as raises:
foo(fn)
self.assertRegexpMatches(str(raises.exception),
"cannot convert native .* to Python object")
def test_dispatcher_in_sequence_arg(self):
@jit(nopython=True)
def one(x):
return x + 1
@jit(nopython=True)
def two(x):
return one(one(x))
@jit(nopython=True)
def three(x):
return one(one(one(x)))
@jit(nopython=True)
def choose(fns, x):
return fns[0](x), fns[1](x), fns[2](x)
# Tuple case
self.assertEqual(choose((one, two, three), 1), (2, 3, 4))
# List case
self.assertEqual(choose([one, one, one], 1), (2, 2, 2))
class TestBoxingDefaultError(unittest.TestCase):
# Testing default error at boxing/unboxing
def test_unbox_runtime_error(self):
# Dummy type has no unbox support
def foo(x):
pass
cres = compile_isolated(foo, (types.Dummy("dummy_type"),))
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point(None)
self.assertEqual(str(raises.exception), "can't unbox dummy_type type")
def test_box_runtime_error(self):
def foo():
return unittest # Module type has no boxing logic
cres = compile_isolated(foo, ())
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point()
pat = "cannot convert native Module.* to Python object"
self.assertRegexpMatches(str(raises.exception), pat)
class TestNoRetryFailedSignature(unittest.TestCase):
"""Test that failed-to-compile signatures are not recompiled.
"""
def run_test(self, func):
fcom = func._compiler
self.assertEqual(len(fcom._failed_cache), 0)
# expected failure because `int` has no `__getitem__`
with self.assertRaises(errors.TypingError):
func(1)
self.assertEqual(len(fcom._failed_cache), 1)
# retry
with self.assertRaises(errors.TypingError):
func(1)
self.assertEqual(len(fcom._failed_cache), 1)
# retry with double
with self.assertRaises(errors.TypingError):
func(1.0)
self.assertEqual(len(fcom._failed_cache), 2)
def test_direct_call(self):
@jit(nopython=True)
def foo(x):
return x[0]
self.run_test(foo)
def test_nested_call(self):
@jit(nopython=True)
def bar(x):
return x[0]
@jit(nopython=True)
def foobar(x):
bar(x)
@jit(nopython=True)
def foo(x):
return bar(x) + foobar(x)
self.run_test(foo)
def test_error_count(self):
def check(field, would_fail):
# Slightly modified from the reproducer in issue #4117.
# Before the patch, the compilation time of the failing case is
# much longer than of the successful case. This can be detected
# by the number of times `trigger()` is visited.
k = 10
counter = {'c': 0}
@generated_jit
def trigger(x):
# Keep track of every visit
counter['c'] += 1
if would_fail:
raise errors.TypingError("invoke_failed")
return lambda x: x
@jit(nopython=True)
def ident(out, x):
pass
def chain_assign(fs, inner=ident):
tab_head, tab_tail = fs[-1], fs[:-1]
@jit(nopython=True)
def assign(out, x):
inner(out, x)
out[0] += tab_head(x)
if tab_tail:
return chain_assign(tab_tail, assign)
else:
return assign
chain = chain_assign((trigger,) * k)
out = np.ones(2)
if would_fail:
with self.assertRaises(errors.TypingError) as raises:
chain(out, 1)
self.assertIn('invoke_failed', str(raises.exception))
else:
chain(out, 1)
# Returns the visit counts
return counter['c']
ct_ok = check('a', False)
ct_bad = check('c', True)
# `trigger()` is visited exactly once for both successful and failed
# compilation.
self.assertEqual(ct_ok, 1)
self.assertEqual(ct_bad, 1)
if __name__ == '__main__':
unittest.main()
|
primitives.py
|
"""Test the protocol.stacks module."""
# Builtins
import threading
import time
# Packages
from phyllo.io.threading.primitives import CancellableSemaphore
def test_semaphore():
"""Test CancellableSemaphore."""
semaphore = CancellableSemaphore()
def run_consumer():
while semaphore.acquire():
print('Consumer acquired a resource!')
pass
print('Consumer quitting!')
consumer_thread = threading.Thread(target=run_consumer)
consumer_thread.start()
for i in range(5):
time.sleep(0.5)
print('Producer releasing resource {} of 5...'.format(i + 1))
semaphore.release()
print('Producer quitting!')
semaphore.cancel()
consumer_thread.join()
print('Consumer joined!')
|
query_expressions.py
|
'''
Created on Mar 16, 2015
@author: brecht
'''
import abc
from multiprocessing import Pipe
from multiprocessing.process import Process
from cassandra.cluster import Cluster
import array
import sys
from multiprocessing.synchronize import Event
class Expression(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def evaluate(self, session, starting_set):
return
@abc.abstractmethod
def can_prune(self):
return True
class Basic_expression(Expression):
def __init__(self, from_table, select_column, where_clause):
self.table = from_table
self.select_column = select_column
self.where_clause = where_clause
def evaluate(self, socket, starting_set):
if len(starting_set) == 0:
return set()
query = "SELECT %s FROM %s" % \
(self.select_column, self.table)
if self.where_clause != "":
query += " WHERE %s" % self.where_clause
'''if self.can_prune() and not starting_set == "*":
if self.table.startswith('samples'):
in_clause = "','".join(starting_set)
query += " AND %s IN ('%s')" % \
(self.select_column, in_clause)
else:
in_clause = ",".join(map(str, starting_set))
query += " AND %s IN (%s)" % \
(self.select_column, in_clause) '''
return async_rows_as_set(socket, query)
def can_prune(self):
return not any (op in self.where_clause \
for op in ["<", ">"])
def __str__(self):
return self.where_clause
class AND_expression(Expression):
def __init__(self, left, right):
self.left = left
self.right = right
def evaluate(self, session, starting_set):
if len(starting_set) == 0:
return set()
'''if self.right.can_prune():
temp = self.left.evaluate(session, starting_set)
return self.right.evaluate(session, temp)
elif self.left.can_prune():
temp = self.right.evaluate(session, starting_set)
return self.left.evaluate(session, temp)
else:
temp = self.left.evaluate(session, starting_set)
return temp & self.right.evaluate(session, temp)'''
temp = self.left.evaluate(session, starting_set)
return temp & self.right.evaluate(session, temp)
def __str__(self):
res = "(" + str(self.left) + ")" + " AND " + "(" + str(self.right) + ")"
return res
def can_prune(self):
return True
class OR_expression(Expression):
def __init__(self, left, right):
self.left = left
self.right = right
def evaluate(self, session, starting_set):
if len(starting_set) == 0:
return set()
return (self.left.evaluate(session, starting_set) | self.right.evaluate(session, starting_set))
def __str__(self):
res = "(" + str(self.left) + ")" + " OR " + "(" + str(self.right) + ")"
return res
def can_prune(self):
return True
class NOT_expression(Expression):
def __init__(self, exp, table, select_column, total_nr_variants):
self.body = exp
self.table = table
self.select_column = select_column
self.total_nr_variants = total_nr_variants
def evaluate(self, session, starting_set):
if len(starting_set) == 0:
return set()
elif starting_set == '*':
if self.table == 'variants':
correct_starting_set = set(range(1, self.total_nr_variants+1))
else:
correct_starting_set = async_rows_as_set(session, "SELECT %s FROM %s" % (self.select_column, self.table))
else:
correct_starting_set = starting_set
'''if self.table == 'variants' and starting_set == "*":
return correct_starting_set - self.body.evaluate(session, "*")
else:
return correct_starting_set - \
self.body.evaluate(session, correct_starting_set)'''
return correct_starting_set - \
self.body.evaluate(session, correct_starting_set)
def __str__(self):
return "NOT (" + str(self.body) + ")"
def can_prune(self):
return True
class GT_wildcard_expression(Expression):
def __init__(self, column, wildcard_rule, rule_enforcement, sample_names, db_contact_points, keyspace, n_variants, cores_for_eval = 1):
self.column = column
self.wildcard_rule = wildcard_rule
if rule_enforcement.startswith('count'):
self.rule_enforcement = 'count'
self.count_comp = rule_enforcement[5:].strip()
else:
self.rule_enforcement = rule_enforcement
self.names = sample_names
self.nr_cores = cores_for_eval
self.db_contact_points = db_contact_points
self.keyspace = keyspace
self.n_variants = n_variants
def __str__(self):
return "[%s].[%s].[%s].[%s]" % (self.column, ','.join(self.names), self.wildcard_rule, self.rule_enforcement)
def can_prune(self):
return True
def evaluate(self, session, starting_set):
step = len(self.names) / self.nr_cores
procs = []
conns = []
results = []
invert = False
invert_count = False
if self.wildcard_rule.startswith('!'):
corrected_rule = self.wildcard_rule[1:]
if self.rule_enforcement == 'all':
target_rule = 'any'
invert = True
elif self.rule_enforcement == 'any':
target_rule = 'all'
invert = True
elif self.rule_enforcement == 'none':
target_rule = 'all'
elif self.rule_enforcement.startswith('count'):
target_rule = 'count'
invert_count = True
else:
target_rule = self.rule_enforcement
corrected_rule = self.wildcard_rule
if starting_set == "*":
correct_starting_set = range(1,self.n_variants+1)
else:
correct_starting_set = starting_set
for i in range(self.nr_cores):
parent_conn, child_conn = Pipe()
conns.append(parent_conn)
p = Process(target=eval(target_rule +'_query'),\
args=(child_conn, self.column, corrected_rule, self.db_contact_points, self.keyspace))
procs.append(p)
p.start()
#Split names in chunks and communicate to procs
for i in range(self.nr_cores):
n = len(self.names)
begin = i*step + min(i, n % self.nr_cores)
end = begin + step
if i < n % self.nr_cores:
end += 1
conns[i].send(self.names[begin:end])
conns[i].send(correct_starting_set)
#Collect results
for i in range(self.nr_cores):
results.append(conns[i].recv())
conns[i].close()
for i in range(self.nr_cores):
procs[i].join()
res = set()
if target_rule == 'any':
for r in results:
res = res | r
elif target_rule in ['all', 'none']:
res = results[0]
for r in results[1:]:
res = res & r
if invert:
res = set(correct_starting_set) - res
if target_rule == 'count':
res_dict = {x: 0 for x in correct_starting_set}
for sub_result_dict in results:
for var, count in sub_result_dict.iteritems():
res_dict[var] += count
if invert_count:
total = len(self.names)
for variant, count in res_dict.iteritems():
res_dict[variant] = total - count
res = set([variant for variant, count in res_dict.iteritems() \
if eval(str(count) + self.count_comp)])
return res
def all_query(conn, field, clause, contact_points, keyspace):
cluster = Cluster(contact_points)
session = cluster.connect(keyspace)
names = conn.recv()
initial_set = conn.recv()
results = set(initial_set)
for name in names:
if len(results) == 0:
break
query = "SELECT variant_id FROM variants_by_samples_%s WHERE sample_name = '%s' AND %s %s " % (field, name, field, clause)
results = async_rows_as_set(session, query) & results
session.shutdown()
conn.send(results)
conn.close()
def any_query(conn, field, clause, contact_points, keyspace):
cluster = Cluster(contact_points)
session = cluster.connect(keyspace)
names = conn.recv()
initial_set = set(conn.recv())
results = set()
for name in names:
query = "SELECT variant_id FROM variants_by_samples_%s WHERE sample_name = '%s' AND %s %s " % (field, name, field, clause)
row = async_rows_as_set(session, query)
results = row | results
session.shutdown()
results = initial_set & results
conn.send(results)
conn.close()
def none_query(conn, field, clause, contact_points, keyspace):
cluster = Cluster(contact_points)
session = cluster.connect(keyspace)
names = conn.recv()
initial_set = conn.recv()
results = set(initial_set)
for name in names:
query = "SELECT variant_id FROM variants_by_samples_%s WHERE sample_name = '%s' AND %s %s " % (field, name, field, clause)
variants = async_rows_as_set(session, query)
results = results - variants
session.shutdown()
conn.send(results)
conn.close()
def count_query(conn, field, clause, contact_points, keyspace):
cluster = Cluster(contact_points)
session = cluster.connect(keyspace)
names = conn.recv()
initial_set = set(conn.recv())
results = dict()
for name in names:
query = '''SELECT variant_id FROM variants_by_samples_%s \
WHERE sample_name = '%s' AND %s %s ''' % (field, name, field, clause)
variants = initial_set & async_rows_as_set(session, query)
results = add_row_to_count_dict(results, variants)
session.shutdown()
conn.send(results)
conn.close()
def add_row_to_count_dict(res_dict, variants):
for var in variants:
if not var in res_dict:
res_dict[var] = 1
else:
res_dict[var] += 1
return res_dict
def async_rows_as_set(session, query):
future = session.execute_async(query)
handler = PagedResultHandler(future)
handler.finished_event.wait()
if handler.error:
sys.stderr.write("Query failed: %s\n" % query)
raise handler.error
else:
return handler.res
class PagedResultHandler(object):
def __init__(self, future):
self.error = None
self.finished_event = Event()
self.future = future
self.future.add_callbacks(
callback=self.handle_page,
errback=self.handle_error)
self.res = set()
def handle_page(self, results):
for row in results:
self.res.add(row[0])
if self.future.has_more_pages:
self.future.start_fetching_next_page()
else:
self.finished_event.set()
def handle_error(self, exc):
self.error = exc
self.finished_event.set()
|
pn_initial_ztp_withthread.py
|
#!/usr/bin/python
""" PN CLI Zero Touch Provisioning (ZTP) """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.module_utils.basic import AnsibleModule
import shlex
import time
import threading
from Queue import Queue
from multiprocessing import Process
DOCUMENTATION = """
---
module: pn_initial_ztp
author: 'Pluribus Networks (devops@pluribusnetworks.com)'
short_description: CLI command to do zero touch provisioning.
description:
Zero Touch Provisioning (ZTP) allows you to provision new switches in your
network automatically, without manual intervention.
It performs following steps:
- Accept EULA
- Disable STP
- Enable all ports
- Create/Join fabric
- Enable STP
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
type: str
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
type: str
pn_fabric_name:
description:
- Specify name of the fabric.
required: False
type: str
pn_fabric_network:
description:
- Specify fabric network type as either mgmt or in-band.
required: False
type: str
choices: ['mgmt', 'in-band']
default: 'mgmt'
pn_fabric_control_network:
description:
- Specify fabric control network as either mgmt or in-band.
required: False
type: str
choices: ['mgmt', 'in-band']
default: 'mgmt'
pn_toggle_40g:
description:
- Flag to indicate if 40g ports should be converted to 10g ports or not.
required: False
default: True
type: bool
pn_inband_ip:
description:
- Inband ips to be assigned to switches starting with this value.
required: False
default: 172.16.0.0/24.
type: str
pn_current_switch:
description:
- Name of the switch on which this task is currently getting executed.
required: False
type: str
pn_static_setup:
description:
- Flag to indicate if static values should be assign to
following switch setup params.
required: False
default: False
type: bool
pn_mgmt_ip:
description:
- Specify MGMT-IP value to be assign if pn_static_setup is True.
required: False
type: str
pn_mgmt_ip_subnet:
description:
- Specify subnet mask for MGMT-IP value to be assign if
pn_static_setup is True.
required: False
type: str
pn_gateway_ip:
description:
- Specify GATEWAY-IP value to be assign if pn_static_setup is True.
required: False
type: str
pn_dns_ip:
description:
- Specify DNS-IP value to be assign if pn_static_setup is True.
required: False
type: str
pn_dns_secondary_ip:
description:
- Specify DNS-SECONDARY-IP value to be assign if pn_static_setup is True
required: False
type: str
pn_domain_name:
description:
- Specify DOMAIN-NAME value to be assign if pn_static_setup is True.
required: False
type: str
pn_ntp_server:
description:
- Specify NTP-SERVER value to be assign if pn_static_setup is True.
required: False
type: str
pn_web_api:
description:
- Flag to enable web api.
default: True
type: bool
pn_stp:
description:
- Flag to enable STP at the end.
required: False
default: False
type: bool
"""
EXAMPLES = """
- name: Auto accept EULA, Disable STP, enable ports and create/join fabric
pn_initial_ztp:
pn_cliusername: "{{ USERNAME }}"
pn_clipassword: "{{ PASSWORD }}"
pn_fabric_name: 'ztp-fabric'
pn_current_switch: "{{ inventory_hostname }}"
"""
RETURN = """
stdout:
description: The set of responses for each command.
returned: always
type: str
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
failed:
description: Indicates whether or not the execution failed on the target.
returned: always
type: bool
"""
CHANGED_FLAG = []
def pn_cli(module):
"""
Method to generate the cli portion to launch the Netvisor cli.
:param module: The Ansible module to fetch username and password.
:return: The cli string for further processing.
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
return cli
def run_cli(module, cli):
"""
Method to execute the cli command on the target node(s) and returns the
output.
:param module: The Ansible module to fetch input parameters.
:param cli: The complete cli string to be executed on the target node(s).
:return: Output/Error or Success msg depending upon the response from cli.
"""
cli = shlex.split(cli)
rc, out, err = module.run_command(cli)
if out:
return out
if err:
module.exit_json(
error='1',
failed=True,
stderr=err.strip(),
msg='Operation Failed: ' + str(cli),
changed=False
)
else:
return 'Success'
def auto_accept_eula(module):
"""
Method to accept the EULA when we first login to a new switch.
:param module: The Ansible module to fetch input parameters.
:return: The output of run_cli() method.
"""
password = module.params['pn_clipassword']
cli = ' /usr/bin/cli --quiet --skip-setup eula-show '
cli = shlex.split(cli)
rc, out, err = module.run_command(cli)
if err:
cli = '/usr/bin/cli --quiet'
cli += ' --skip-setup --script-password '
cli += ' switch-setup-modify password ' + password
cli += ' eula-accepted true '
f= open('output.txt', 'a')
f.write("accepted eula \n")
f.close()
return run_cli(module, cli)
elif out:
f= open('output.txt', 'a')
f.write("already accepted eula \n")
f.close()
return ' EULA has been accepted already '
def update_switch_names(module, switch_name):
"""
Method to update switch names.
:param module: The Ansible module to fetch input parameters.
:param switch_name: Name to assign to the switch.
:return: String describing switch name got modified or not.
"""
cli = pn_cli(module)
cli += ' switch-setup-show format switch-name '
if switch_name in run_cli(module, cli).split()[1]:
f= open('output.txt', 'a')
f.write("already updated switch names \n")
f.close()
return ' Switch name is same as hostname! '
else:
cli = pn_cli(module)
cli += ' switch-setup-modify switch-name ' + switch_name
run_cli(module, cli)
f= open('output.txt', 'a')
f.write("updated switch names \n")
f.close()
return ' Updated switch name to match hostname! '
def make_switch_setup_static(module):
"""
Method to assign static values to different switch setup parameters.
:param module: The Ansible module to fetch input parameters.
"""
mgmt_ip = module.params['pn_mgmt_ip']
mgmt_ip_subnet = module.params['pn_mgmt_ip_subnet']
gateway_ip = module.params['pn_gateway_ip']
dns_ip = module.params['pn_dns_ip']
dns_secondary_ip = module.params['pn_dns_secondary_ip']
domain_name = module.params['pn_domain_name']
ntp_server = module.params['pn_ntp_server']
cli = pn_cli(module)
cli += ' switch-setup-modify '
if mgmt_ip:
ip = mgmt_ip + '/' + mgmt_ip_subnet
cli += ' mgmt-ip ' + ip
if gateway_ip:
cli += ' gateway-ip ' + gateway_ip
if dns_ip:
cli += ' dns-ip ' + dns_ip
if dns_secondary_ip:
cli += ' dns-secondary-ip ' + dns_secondary_ip
if domain_name:
cli += ' domain-name ' + domain_name
if ntp_server:
cli += ' ntp-server ' + ntp_server
clicopy = cli
f= open('output.txt', 'a')
f.write("already updated switch setup \n")
f.close()
if clicopy.split('switch-setup-modify')[1] != ' ':
run_cli(module, cli)
def modify_stp_local(module, modify_flag):
"""
Method to enable/disable STP (Spanning Tree Protocol) on a switch.
:param module: The Ansible module to fetch input parameters.
:param modify_flag: Enable/disable flag to set.
:return: The output of run_cli() method.
"""
cli = pn_cli(module)
cli += ' switch-local stp-show format enable '
current_state = run_cli(module, cli).split()[1]
if current_state == 'yes':
cli = pn_cli(module)
cli += ' switch-local stp-modify ' + modify_flag
f= open('output.txt', 'a')
f.write(" stp \n")
f.close()
return run_cli(module, cli)
else:
f= open('output.txt', 'a')
f.write("already stp \n")
f.close()
return ' Already modified '
def configure_control_network(module, network):
"""
Method to configure the fabric control network.
:param module: The Ansible module to fetch input parameters.
:param network: It can be in-band or management.
:return: The output of run_cli() method.
"""
cli = pn_cli(module)
cli += ' fabric-info format control-network '
current_control_network = run_cli(module, cli).split()[1]
if current_control_network != network:
cli = pn_cli(module)
cli += ' fabric-local-modify control-network ' + network
f= open('output.txt', 'a')
f.write("control network \n")
f.close()
return run_cli(module, cli)
else:
f= open('output.txt', 'a')
f.write("already control network \n")
f.close()
return ' Already configured '
def enable_ports(module):
"""
Method to enable all ports of a switch.
:param module: The Ansible module to fetch input parameters.
:return: The output of run_cli() method.
"""
cli = pn_cli(module)
clicopy = cli
cli += ' port-config-show format port no-show-headers '
out = run_cli(module, cli)
cli = clicopy
cli += ' port-config-show format port speed 40g no-show-headers '
out_40g = run_cli(module, cli)
out_remove10g = []
if len(out_40g) > 0 and out_40g != 'Success':
out_40g = out_40g.split()
out_40g = list(set(out_40g))
if len(out_40g) > 0:
for port_number in out_40g:
out_remove10g.append(str(int(port_number) + int(1)))
out_remove10g.append(str(int(port_number) + int(2)))
out_remove10g.append(str(int(port_number) + int(3)))
if out:
out = out.split()
out = set(out) - set(out_remove10g)
out = list(out)
if out:
ports = ','.join(out)
cli = clicopy
cli += ' port-config-modify port %s enable ' % ports
f= open('output.txt', 'a')
f.write("enable port \n")
f.close()
return run_cli(module, cli)
else:
return out
def create_or_join_fabric(module, fabric_name, fabric_network):
"""
Method to create/join a fabric with default fabric type as mgmt.
:param module: The Ansible module to fetch input parameters.
:param fabric_name: Name of the fabric to create/join.
:param fabric_network: Type of the fabric to create (mgmt/in-band).
Default value: mgmt
:return: The output of run_cli() method.
"""
cli = pn_cli(module)
clicopy = cli
cli += ' fabric-show format name no-show-headers '
existing_fabrics = run_cli(module, cli).split()
if fabric_name not in existing_fabrics:
cli = clicopy
cli += ' fabric-create name ' + fabric_name
cli += ' fabric-network ' + fabric_network
else:
cli = clicopy
cli += ' fabric-info format name no-show-headers'
cli = shlex.split(cli)
rc, out, err = module.run_command(cli)
if err:
cli = clicopy
cli += ' fabric-join name ' + fabric_name
elif out:
present_fabric_name = out.split()
if present_fabric_name[1] not in existing_fabrics:
cli = clicopy
cli += ' fabric-join name ' + fabric_name
else:
return 'Switch already in the fabric'
f= open('output.txt', 'a')
f.write("already fabric \n")
f.close()
f= open('output.txt', 'a')
f.write("fabric \n")
f.close()
return run_cli(module, cli)
def enable_web_api(module):
"""
Method to enable web api on switches.
:param module: The Ansible module to fetch input parameters.
"""
cli = pn_cli(module)
cli += ' admin-service-modify web if mgmt '
f= open('output.txt', 'a')
f.write("web api \n")
f.close()
run_cli(module, cli)
def toggle_40g_local(module):
"""
Method to toggle 40g ports to 10g ports.
:param module: The Ansible module to fetch input parameters.
:return: The output messages for assignment.
"""
output = ''
cli = pn_cli(module)
clicopy = cli
cli += ' switch-local lldp-show format local-port no-show-headers '
local_ports = run_cli(module, cli).split()
cli = clicopy
cli += ' switch-local port-config-show speed 40g '
cli += ' format port no-show-headers '
ports_40g = run_cli(module, cli)
if len(ports_40g) > 0 and ports_40g != 'Success':
ports_40g = ports_40g.split()
ports_to_modify = list(set(ports_40g) - set(local_ports))
for port in ports_to_modify:
next_port = str(int(port) + 1)
cli = clicopy
cli += ' switch-local'
cli += ' port-show port %s format bezel-port' % next_port
cli += ' no-show-headers'
bezel_port = run_cli(module, cli).split()[0]
if '.2' in bezel_port:
end_port = int(port) + 3
range_port = port + '-' + str(end_port)
cli = clicopy
cli += ' switch-local port-config-modify port %s ' % port
cli += ' disable '
output += 'port ' + port + ' disabled'
output += run_cli(module, cli)
cli = clicopy
cli += ' switch-local port-config-modify port %s ' % port
cli += ' speed 10g '
output += 'port ' + port + ' converted to 10g'
output += run_cli(module, cli)
cli = clicopy
cli += ' switch-local port-config-modify port %s ' % range_port
cli += ' enable '
output += 'port range_port ' + range_port + ' enabled'
output += run_cli(module, cli)
f= open('output.txt', 'a')
f.write("toggle \n")
f.close()
time.sleep(10)
return output
def assign_inband_ip(module, inband_address):
"""
Method to assign in-band ips to switches.
:param module: The Ansible module to fetch input parameters.
:param inband_address: The network ip for the in-band ips.
:return: Assigned inband ip or None.
"""
address = inband_address.split('.')
static_part = str(address[0]) + '.' + str(address[1]) + '.'
static_part += str(address[2]) + '.'
last_octet = str(address[3]).split('/')
subnet = last_octet[1]
cli = pn_cli(module)
clicopy = cli
ip_count = 1
ip = static_part + str(ip_count) + '/' + subnet
# Get existing in-band ip.
cli += ' switch-local switch-setup-show format in-band-ip '
existing_inband_ip = run_cli(module, cli)
# If existing in-band ip is not the same then assign new ip.
if ip not in existing_inband_ip:
cli = clicopy
cli += ' fabric-node-show format in-band-ip '
cli += ' no-show-headers '
assigned_ips = run_cli(module, cli).split()
# Make sure ip has not been assigned to any of the switches.
while ip in assigned_ips:
# If ip is not unique, increase the ip count by 1.
ip_count += 1
ip = static_part + str(ip_count) + '/' + subnet
if ip in existing_inband_ip:
return None
f= open('output.txt', 'a')
f.write("inband \n")
f.close()
# Assign unique in-band ip to the switch.
cli = clicopy
cli += ' switch-local switch-setup-modify '
cli += ' in-band-ip ' + ip
if 'Setup completed successfully' in run_cli(module, cli):
return ip
return None
def do_stuff(q):
while True:
print q.get()
q.task_done()
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_fabric_name=dict(required=True, type='str'),
pn_fabric_network=dict(required=False, type='str',
choices=['mgmt', 'in-band'],
default='mgmt'),
pn_fabric_control_network=dict(required=False, type='str',
choices=['mgmt', 'in-band'],
default='mgmt'),
pn_toggle_40g=dict(required=False, type='bool', default=True),
pn_inband_ip=dict(required=False, type='str',
default='172.16.0.0/24'),
pn_current_switch=dict(required=False, type='str'),
pn_static_setup=dict(required=False, type='bool', default=False),
pn_mgmt_ip=dict(required=False, type='str'),
pn_mgmt_ip_subnet=dict(required=False, type='str'),
pn_gateway_ip=dict(required=False, type='str'),
pn_dns_ip=dict(required=False, type='str'),
pn_dns_secondary_ip=dict(required=False, type='str'),
pn_domain_name=dict(required=False, type='str'),
pn_ntp_server=dict(required=False, type='str'),
pn_web_api=dict(type='bool', default=True),
pn_stp=dict(required=False, type='bool', default=False),
)
)
fabric_name = module.params['pn_fabric_name']
fabric_network = module.params['pn_fabric_network']
control_network = module.params['pn_fabric_control_network']
toggle_40g_flag = module.params['pn_toggle_40g']
current_switch = module.params['pn_current_switch']
message = ''
global CHANGED_FLAG
CHANGED_FLAG = []
list_threads = []
# Auto accept EULA
if 'Setup completed successfully' in auto_accept_eula(module):
message += ' %s: EULA accepted \n' % current_switch
CHANGED_FLAG.append(True)
else:
message += ' %s: EULA has already been accepted \n' % current_switch
if 'already in the fabric' in create_or_join_fabric(module, fabric_name,
fabric_network):
message += ' %s: Already a part of fabric %s \n' % (current_switch,
fabric_name)
else:
message += ' %s: Joined fabric %s \n' % (current_switch,
fabric_name)
CHANGED_FLAG.append(True)
# Update switch names to match host names from hosts file
t=Process(target = update_switch_names, args = (module, current_switch, ))
list_threads.append(t)
# Make switch setup static
t=Process(target = make_switch_setup_static,args = (module, ))
list_threads.append(t)
# Create/join fabric
t=Process(target = create_or_join_fabric,args = (module, fabric_name,
fabric_network, ))
list_threads.append(t)
# Configure fabric control network to either mgmt or in-band
t=Process(target = configure_control_network,args = (module, control_network, ))
list_threads.append(t)
# Enable web api if flag is True
t=Process(target = enable_web_api, args =(module, ))
list_threads.append(t)
# Disable STP
t=Process(target = modify_stp_local, args =(module, 'disable', ))
list_threads.append(t)
# Enable ports
t=Process(target = enable_ports,args = (module, ))
list_threads.append(t)
# Assign in-band ips.
t=Process(target = assign_inband_ip, args =(module, module.params['pn_inband_ip']), )
list_threads.append(t)
# Enable STP if flag is True
# t=Process(target = modify_stp_local,args = (module, 'enable', ))
# list_threads.append(t)
for t in list_threads:
t.start()
# for i in range(9):
# list_threads.get().start()
#t.start()
#list_threads.task_done()
#for i in range(8):
# list_threads.get().join()
for t in list_threads:
t.join()
'''
q = Queue()
#q.put(update_switch_names(module, current_switch))
#q.put(assign_inband_ip(module, module.params['pn_inband_ip']))
#q.put(make_switch_setup_static(module))
#q.put(update_switch_names(module, current_switch))
#q.put(configure_control_network(module, control_network))
#q.put(enable_web_api(module))
q.put(modify_stp_local(module, 'disable'))
#q.put(enable_ports(module))
# Exit the module and return the required JSON
# Toggle 40g ports to 10g
for i in range(8):
worker = Thread(target=do_stuff, args=(q,))
# worker.setDaemon(True)
worker.start()
q.join()
'''
if toggle_40g_flag:
if toggle_40g_local(module):
message += ' %s: Toggled 40G ports to 10G \n' % current_switch
CHANGED_FLAG.append(True)
module.exit_json(
stdout=message,
error='0',
failed=False,
changed=True if True in CHANGED_FLAG else False
)
if __name__ == '__main__':
main()
|
test_cpp_tcp_client.py
|
import multiprocessing as mp
from pathlib import Path
import subprocess
from unittest.mock import MagicMock
import msgpack
from libmuscle.mcp.tcp_server import TcpServer
from libmuscle.mcp.message import Message
from libmuscle.post_office import PostOffice
from ymmsl import Reference, Settings
def tcp_server_process(control_pipe):
control_pipe[0].close()
settings = Settings({'test_setting': 42})
data = {'test1': 10, 'test2': [None, True, 'testing']}
receiver = Reference('test_receiver.test_port2')
message = Message(
Reference('test_sender.test_port'),
receiver,
10, 1.0, 2.0, settings, data).encoded()
def get_message(receiver):
assert receiver == 'test_receiver.test_port2'
return message
post_office = MagicMock()
post_office.done = False
post_office.get_message = get_message
sender_instance_id = Reference('test_sender')
server = TcpServer(sender_instance_id, post_office)
control_pipe[1].send(server.get_location())
control_pipe[1].recv()
control_pipe[1].close()
server.close()
def test_cpp_tcp_client(log_file_in_tmpdir):
# create server process
server_pipe = mp.Pipe()
server_process = mp.Process(target=tcp_server_process, args=(server_pipe,))
server_process.start()
server_pipe[1].close()
server_loc = server_pipe[0].recv()
# create C++ client
# it receives and checks settings, and sends a log message
# see libmuscle/cpp/src/libmuscle/tests/mmp_client_test.cpp
cpp_build_dir = Path(__file__).parents[1] / 'libmuscle' / 'cpp' / 'build'
lib_paths = [
cpp_build_dir / 'grpc' / 'c-ares' / 'c-ares' / 'lib',
cpp_build_dir / 'grpc' / 'zlib' / 'zlib' / 'lib',
cpp_build_dir / 'grpc' / 'openssl' / 'openssl' / 'lib',
cpp_build_dir / 'protobuf' / 'protobuf' / 'lib',
cpp_build_dir / 'grpc' / 'grpc' / 'lib',
cpp_build_dir / 'msgpack' / 'msgpack' / 'lib']
env = {
'LD_LIBRARY_PATH': ':'.join(map(str, lib_paths))}
cpp_test_dir = cpp_build_dir / 'libmuscle' / 'tests'
cpp_test_client = cpp_test_dir / 'tcp_client_test'
result = subprocess.run([str(cpp_test_client), server_loc], env=env)
server_pipe[0].send(None)
server_pipe[0].close()
server_process.join()
assert result.returncode == 0
assert server_process.exitcode == 0
|
protocol_astrohaven_simulator.py
|
import datetime
import queue
from serial import serialutil
import threading
import time
from panoptes.pocs.dome import astrohaven
from panoptes.utils import serial_handlers
from panoptes.pocs.utils.logger import get_logger
Protocol = astrohaven.Protocol
CLOSED_POSITION = 0
NUDGE_OPEN_INCREMENT = 1
NUDGE_CLOSED_INCREMENT = -1
OPEN_POSITION = 10
def _drain_queue(q):
cmd = None
while not q.empty():
cmd = q.get_nowait()
return cmd # Present just for debugging.
class Shutter(object):
"""Represents one side of the clamshell dome."""
def __init__(self, side, open_command, close_command, is_open_char, is_closed_char, logger):
self.side = side
self.open_commands = [open_command, Protocol.OPEN_BOTH]
self.close_commands = [close_command, Protocol.CLOSE_BOTH]
self.is_open_char = is_open_char
self.is_closed_char = is_closed_char
self.logger = logger
self.position = CLOSED_POSITION
self.min_position = min(CLOSED_POSITION, OPEN_POSITION)
self.max_position = max(CLOSED_POSITION, OPEN_POSITION)
def handle_input(self, input_char):
if input_char in self.open_commands:
if self.is_open:
return (False, self.is_open_char)
self.logger.debug(f'Opening side {self.side}, starting position {self.position}')
self.adjust_position(NUDGE_OPEN_INCREMENT)
if self.is_open:
self.logger.debug(f'Opened side {self.side}')
return (True, self.is_open_char)
return (True, input_char)
elif input_char in self.close_commands:
if self.is_closed:
return (False, self.is_closed_char)
self.logger.debug(f'Closing side {self.side}, starting position {self.position}')
self.adjust_position(NUDGE_CLOSED_INCREMENT)
if self.is_closed:
self.logger.debug(f'Closed side {self.side}')
return (True, self.is_closed_char)
return (True, input_char)
else:
return (False, None)
def adjust_position(self, nudge_by):
new_position = self.position + nudge_by
self.position = min(self.max_position, max(self.min_position, new_position))
@property
def is_open(self):
return self.position == OPEN_POSITION
@property
def is_closed(self):
return self.position == CLOSED_POSITION
class AstrohavenPLCSimulator:
"""Simulates the behavior of the Vision 130 PLC in an Astrohaven clamshell dome.
The RS-232 connection is simulated with an input queue of bytes (one character strings,
really) and an output queue of bytes (also 1 char strings).
This class provides a run function which can be called from a Thread to execute.
"""
def __init__(self, command_queue, status_queue, stop, logger):
"""
Args:
command_queue: The queue.Queue instance from which command bytes are read one at a time
and acted upon.
status_queue: The queue.Queue instance to which bytes are written one at a time
(approximately once a second) to report the state of the dome or the response
to a command byte.
stop: a threading.Event which is checked to see if run should stop executing.
"""
self.command_queue = command_queue
self.status_queue = status_queue
self.stop = stop
self.logger = logger
self.delta = datetime.timedelta(seconds=1)
self.shutter_a = Shutter('A', Protocol.OPEN_A, Protocol.CLOSE_A, Protocol.A_OPEN_LIMIT,
Protocol.A_CLOSE_LIMIT, self.logger)
self.shutter_b = Shutter('B', Protocol.OPEN_B, Protocol.CLOSE_B, Protocol.B_OPEN_LIMIT,
Protocol.B_CLOSE_LIMIT, self.logger)
self.next_output_code = None
self.next_output_time = None
self.logger.info('AstrohavenPLCSimulator created')
def __del__(self):
if not self.stop.is_set():
self.logger.critical('AstrohavenPLCSimulator.__del__ stop is NOT set')
def run(self):
self.logger.info('AstrohavenPLCSimulator.run ENTER')
self.next_output_time = datetime.datetime.now()
while True:
if self.stop.is_set():
self.logger.info('Returning from AstrohavenPLCSimulator.run EXIT')
return
now = datetime.datetime.now()
remaining = (self.next_output_time - now).total_seconds()
self.logger.info(f'AstrohavenPLCSimulator.run remaining={remaining}')
if remaining <= 0:
self.do_output()
continue
try:
c = self.command_queue.get(block=True, timeout=remaining)
except queue.Empty:
continue
if self.handle_input(c):
# This wait is here to reflect the fact that responses from the Astrohaven PLC
# don't appear to be instantaneous, and the Wheaton originated driver had pauses
# and drains of input from the PLC before accepting a response.
time.sleep(0.2)
# Ignore accumulated input (i.e. assume that the PLC is ignore/discarding input
# while it is performing a command). But do the draining before performing output
# so that if the driver responds immediately, we don't lose the next command.
_drain_queue(self.command_queue)
self.do_output()
def do_output(self):
c = self.next_output_code
if not c:
c = self.compute_state()
self.logger.debug('AstrohavenPLCSimulator.compute_state -> {!r}', c)
self.next_output_code = None
# We drop output if the queue is full.
if not self.status_queue.full():
self.status_queue.put(c, block=False)
self.next_output_time = datetime.datetime.now() + self.delta
def handle_input(self, c):
self.logger.debug('AstrohavenPLCSimulator.handle_input {!r}', c)
(a_acted, a_resp) = self.shutter_a.handle_input(c)
(b_acted, b_resp) = self.shutter_b.handle_input(c)
# Use a_resp if a_acted or if there is no b_resp
joint_resp = (a_acted and a_resp) or b_resp or a_resp
if not (a_acted or b_acted):
# Might nonetheless be a valid command request. If so, echo the limit response.
if joint_resp and not self.next_output_code:
self.next_output_code = joint_resp
return True
else:
return False
else:
# Replace the pending output (if any) with the output for this command.
self.next_output_code = joint_resp
return True
def compute_state(self):
# TODO(jamessynge): Validate that this is correct. In particular, if we start with both
# shutters closed, then nudge A open a bit, what is reported? Ditto with B only, and with
# both nudged open (but not fully open).
if self.shutter_a.is_closed:
if self.shutter_b.is_closed:
return Protocol.BOTH_CLOSED
else:
return Protocol.A_IS_CLOSED
elif self.shutter_b.is_closed:
return Protocol.B_IS_CLOSED
else:
return Protocol.BOTH_OPEN
class AstrohavenSerialSimulator(serial_handlers.NoOpSerial):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = get_logger()
self.plc_thread = None
self.command_queue = queue.Queue(maxsize=50)
self.status_queue = queue.Queue(maxsize=1000)
self.stop = threading.Event()
self.stop.set()
self.plc = AstrohavenPLCSimulator(self.command_queue, self.status_queue, self.stop,
self.logger)
def __del__(self):
if self.plc_thread:
self.logger.critical('AstrohavenPLCSimulator.__del__ plc_thread is still present')
self.stop.set()
self.plc_thread.join(timeout=3.0)
def open(self):
"""Open port.
Raises:
SerialException if the port cannot be opened.
"""
if not self.is_open:
self.is_open = True
self._reconfigure_port()
def close(self):
"""Close port immediately."""
self.is_open = False
self._reconfigure_port()
@property
def in_waiting(self):
"""The number of input bytes available to read immediately."""
if not self.is_open:
raise serialutil.portNotOpenError
return self.status_queue.qsize()
def reset_input_buffer(self):
"""Flush input buffer, discarding all it’s contents."""
_drain_queue(self.status_queue)
def read(self, size=1):
"""Read size bytes.
If a timeout is set it may return fewer characters than requested.
With no timeout it will block until the requested number of bytes
is read.
Args:
size: Number of bytes to read.
Returns:
Bytes read from the port, of type 'bytes'.
"""
if not self.is_open:
raise serialutil.portNotOpenError
# Not checking if the config is OK, so will try to read from a possibly
# empty queue if using the wrong baudrate, etc. This is deliberate.
response = bytearray()
timeout_obj = serialutil.Timeout(self.timeout)
while True:
b = self._read1(timeout_obj)
if b:
response += b
if size is not None and len(response) >= size:
break
else:
# The timeout expired while in _read1.
break
if timeout_obj.expired():
break
response = bytes(response)
self.logger.debug('AstrohavenSerialSimulator.read({}) -> {!r}', size, response)
return response
@property
def out_waiting(self):
"""The number of bytes in the output buffer."""
if not self.is_open:
raise serialutil.portNotOpenError
return self.command_queue.qsize()
def reset_output_buffer(self):
"""Clear output buffer.
Aborts the current output, discarding all that is in the output buffer.
"""
if not self.is_open:
raise serialutil.portNotOpenError
_drain_queue(self.command_queue)
def flush(self):
"""Write the buffered data to the output device.
We interpret that here as waiting until the PLC simulator has taken all of the
commands from the queue.
"""
if not self.is_open:
raise serialutil.portNotOpenError
while not self.command_queue.empty():
time.sleep(0.01)
def write(self, data):
"""Write the bytes data to the port.
Args:
data: The data to write (bytes or bytearray instance).
Returns:
Number of bytes written.
Raises:
SerialTimeoutException: In case a write timeout is configured for
the port and the time is exceeded.
"""
if not isinstance(data, (bytes, bytearray)):
raise ValueError("write takes bytes")
data = bytes(data) # Make sure it can't change.
self.logger.info('AstrohavenSerialSimulator.write({!r})', data)
count = 0
timeout_obj = serialutil.Timeout(self.write_timeout)
for b in data:
self._write1(b, timeout_obj)
count += 1
return count
# --------------------------------------------------------------------------
@property
def is_config_ok(self):
return (self.baudrate == 9600 and self.bytesize == serialutil.EIGHTBITS and
self.parity == serialutil.PARITY_NONE and not self.rtscts and not self.dsrdtr)
def _read1(self, timeout_obj):
if not self.is_open:
raise serialutil.portNotOpenError
try:
c = self.status_queue.get(block=True, timeout=timeout_obj.time_left())
assert isinstance(c, str)
assert len(c) == 1
b = c.encode(encoding='ascii')
assert len(c) == 1
return b
except queue.Empty:
return None
def _write1(self, b, timeout_obj):
if not self.is_open:
raise serialutil.portNotOpenError
try:
self.command_queue.put(chr(b), block=True, timeout=timeout_obj.time_left())
except queue.Full:
# This exception is "lossy" in that the caller can't tell how much was written.
raise serialutil.writeTimeoutError
# --------------------------------------------------------------------------
# There are a number of methods called by SerialBase that need to be
# implemented by sub-classes, assuming their calls haven't been blocked
# by replacing the calling methods/properties. These are no-op
# implementations.
def _reconfigure_port(self):
"""Reconfigure the open port after a property has been changed.
If you need to know which property has been changed, override the
setter for the appropriate properties.
"""
need_thread = self.is_open and self.is_config_ok
if need_thread and not self.plc_thread:
_drain_queue(self.command_queue)
_drain_queue(self.status_queue)
self.stop.clear()
self.plc_thread = threading.Thread(
name='Astrohaven PLC Simulator', target=lambda: self.plc.run(), daemon=True)
self.plc_thread.start()
elif self.plc_thread and not need_thread:
self.stop.set()
self.plc_thread.join(timeout=30.0)
if self.plc_thread.is_alive():
raise Exception(self.plc_thread.name + " thread did not stop!")
self.plc_thread = None
_drain_queue(self.command_queue)
_drain_queue(self.status_queue)
def _update_rts_state(self):
"""Handle rts being set to some value.
"self.rts = value" has been executed, for some value. This may not
have changed the value.
"""
pass
def _update_dtr_state(self):
"""Handle dtr being set to some value.
"self.dtr = value" has been executed, for some value. This may not
have changed the value.
"""
pass
def _update_break_state(self):
"""Handle break_condition being set to some value.
"self.break_condition = value" has been executed, for some value.
This may not have changed the value.
Note that break_condition is set and then cleared by send_break().
"""
pass
Serial = AstrohavenSerialSimulator
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.